1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_btree.h"
17 #include "xfs_ialloc.h"
18 #include "xfs_ialloc_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
25 #include "xfs_icreate_item.h"
26 #include "xfs_icache.h"
27 #include "xfs_trace.h"
32 * Lookup a record by ino in the btree given by cur.
36 struct xfs_btree_cur *cur, /* btree cursor */
37 xfs_agino_t ino, /* starting inode of chunk */
38 xfs_lookup_t dir, /* <=, >=, == */
39 int *stat) /* success/failure */
41 cur->bc_rec.i.ir_startino = ino;
42 cur->bc_rec.i.ir_holemask = 0;
43 cur->bc_rec.i.ir_count = 0;
44 cur->bc_rec.i.ir_freecount = 0;
45 cur->bc_rec.i.ir_free = 0;
46 return xfs_btree_lookup(cur, dir, stat);
50 * Update the record referred to by cur to the value given.
51 * This either works (return 0) or gets an EFSCORRUPTED error.
53 STATIC int /* error */
55 struct xfs_btree_cur *cur, /* btree cursor */
56 xfs_inobt_rec_incore_t *irec) /* btree record */
58 union xfs_btree_rec rec;
60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
61 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
63 rec.inobt.ir_u.sp.ir_count = irec->ir_count;
64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
66 /* ir_holemask/ir_count not supported on-disk */
67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
70 return xfs_btree_update(cur, &rec);
73 /* Convert on-disk btree record to incore inobt record. */
75 xfs_inobt_btrec_to_irec(
77 union xfs_btree_rec *rec,
78 struct xfs_inobt_rec_incore *irec)
80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
81 if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
83 irec->ir_count = rec->inobt.ir_u.sp.ir_count;
84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
88 * values for full inode chunks.
90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
91 irec->ir_count = XFS_INODES_PER_CHUNK;
93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
99 * Get the data from the pointed-to record.
103 struct xfs_btree_cur *cur,
104 struct xfs_inobt_rec_incore *irec,
107 struct xfs_mount *mp = cur->bc_mp;
108 xfs_agnumber_t agno = cur->bc_private.a.agno;
109 union xfs_btree_rec *rec;
113 error = xfs_btree_get_rec(cur, &rec, stat);
114 if (error || *stat == 0)
117 xfs_inobt_btrec_to_irec(mp, rec, irec);
119 if (!xfs_verify_agino(mp, agno, irec->ir_startino))
121 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
122 irec->ir_count > XFS_INODES_PER_CHUNK)
124 if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
127 /* if there are no holes, return the first available offset */
128 if (!xfs_inobt_issparse(irec->ir_holemask))
129 realfree = irec->ir_free;
131 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec);
132 if (hweight64(realfree) != irec->ir_freecount)
139 "%s Inode BTree record corruption in AG %d detected!",
140 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno);
142 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
143 irec->ir_startino, irec->ir_count, irec->ir_freecount,
144 irec->ir_free, irec->ir_holemask);
145 return -EFSCORRUPTED;
149 * Insert a single inobt record. Cursor must already point to desired location.
152 xfs_inobt_insert_rec(
153 struct xfs_btree_cur *cur,
160 cur->bc_rec.i.ir_holemask = holemask;
161 cur->bc_rec.i.ir_count = count;
162 cur->bc_rec.i.ir_freecount = freecount;
163 cur->bc_rec.i.ir_free = free;
164 return xfs_btree_insert(cur, stat);
168 * Insert records describing a newly allocated inode chunk into the inobt.
172 struct xfs_mount *mp,
173 struct xfs_trans *tp,
174 struct xfs_buf *agbp,
179 struct xfs_btree_cur *cur;
180 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
181 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
186 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
188 for (thisino = newino;
189 thisino < newino + newlen;
190 thisino += XFS_INODES_PER_CHUNK) {
191 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
193 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
198 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
199 XFS_INODES_PER_CHUNK,
200 XFS_INODES_PER_CHUNK,
201 XFS_INOBT_ALL_FREE, &i);
203 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
209 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
215 * Verify that the number of free inodes in the AGI is correct.
219 xfs_check_agi_freecount(
220 struct xfs_btree_cur *cur,
223 if (cur->bc_nlevels == 1) {
224 xfs_inobt_rec_incore_t rec;
229 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
234 error = xfs_inobt_get_rec(cur, &rec, &i);
239 freecount += rec.ir_freecount;
240 error = xfs_btree_increment(cur, 0, &i);
246 if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
247 ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
252 #define xfs_check_agi_freecount(cur, agi) 0
256 * Initialise a new set of inodes. When called without a transaction context
257 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
258 * than logging them (which in a transaction context puts them into the AIL
259 * for writeback rather than the xfsbufd queue).
262 xfs_ialloc_inode_init(
263 struct xfs_mount *mp,
264 struct xfs_trans *tp,
265 struct list_head *buffer_list,
269 xfs_agblock_t length,
272 struct xfs_buf *fbuf;
273 struct xfs_dinode *free;
281 * Loop over the new block(s), filling in the inodes. For small block
282 * sizes, manipulate the inodes in buffers which are multiples of the
285 nbufs = length / M_IGEO(mp)->blocks_per_cluster;
288 * Figure out what version number to use in the inodes we create. If
289 * the superblock version has caught up to the one that supports the new
290 * inode format, then use the new inode version. Otherwise use the old
291 * version so that old kernels will continue to be able to use the file
294 * For v3 inodes, we also need to write the inode number into the inode,
295 * so calculate the first inode number of the chunk here as
296 * XFS_AGB_TO_AGINO() only works within a filesystem block, not
297 * across multiple filesystem blocks (such as a cluster) and so cannot
298 * be used in the cluster buffer loop below.
300 * Further, because we are writing the inode directly into the buffer
301 * and calculating a CRC on the entire inode, we have ot log the entire
302 * inode so that the entire range the CRC covers is present in the log.
303 * That means for v3 inode we log the entire buffer rather than just the
306 if (xfs_sb_version_hascrc(&mp->m_sb)) {
308 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
311 * log the initialisation that is about to take place as an
312 * logical operation. This means the transaction does not
313 * need to log the physical changes to the inode buffers as log
314 * recovery will know what initialisation is actually needed.
315 * Hence we only need to log the buffers as "ordered" buffers so
316 * they track in the AIL as if they were physically logged.
319 xfs_icreate_log(tp, agno, agbno, icount,
320 mp->m_sb.sb_inodesize, length, gen);
324 for (j = 0; j < nbufs; j++) {
328 d = XFS_AGB_TO_DADDR(mp, agno, agbno +
329 (j * M_IGEO(mp)->blocks_per_cluster));
330 fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
332 M_IGEO(mp)->blocks_per_cluster,
337 /* Initialize the inode buffers and log them appropriately. */
338 fbuf->b_ops = &xfs_inode_buf_ops;
339 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
340 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
341 int ioffset = i << mp->m_sb.sb_inodelog;
342 uint isize = xfs_dinode_size(version);
344 free = xfs_make_iptr(mp, fbuf, i);
345 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
346 free->di_version = version;
347 free->di_gen = cpu_to_be32(gen);
348 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
351 free->di_ino = cpu_to_be64(ino);
353 uuid_copy(&free->di_uuid,
354 &mp->m_sb.sb_meta_uuid);
355 xfs_dinode_calc_crc(mp, free);
357 /* just log the inode core */
358 xfs_trans_log_buf(tp, fbuf, ioffset,
359 ioffset + isize - 1);
365 * Mark the buffer as an inode allocation buffer so it
366 * sticks in AIL at the point of this allocation
367 * transaction. This ensures the they are on disk before
368 * the tail of the log can be moved past this
369 * transaction (i.e. by preventing relogging from moving
370 * it forward in the log).
372 xfs_trans_inode_alloc_buf(tp, fbuf);
375 * Mark the buffer as ordered so that they are
376 * not physically logged in the transaction but
377 * still tracked in the AIL as part of the
378 * transaction and pin the log appropriately.
380 xfs_trans_ordered_buf(tp, fbuf);
383 fbuf->b_flags |= XBF_DONE;
384 xfs_buf_delwri_queue(fbuf, buffer_list);
392 * Align startino and allocmask for a recently allocated sparse chunk such that
393 * they are fit for insertion (or merge) into the on-disk inode btrees.
397 * When enabled, sparse inode support increases the inode alignment from cluster
398 * size to inode chunk size. This means that the minimum range between two
399 * non-adjacent inode records in the inobt is large enough for a full inode
400 * record. This allows for cluster sized, cluster aligned block allocation
401 * without need to worry about whether the resulting inode record overlaps with
402 * another record in the tree. Without this basic rule, we would have to deal
403 * with the consequences of overlap by potentially undoing recent allocations in
404 * the inode allocation codepath.
406 * Because of this alignment rule (which is enforced on mount), there are two
407 * inobt possibilities for newly allocated sparse chunks. One is that the
408 * aligned inode record for the chunk covers a range of inodes not already
409 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
410 * other is that a record already exists at the aligned startino that considers
411 * the newly allocated range as sparse. In the latter case, record content is
412 * merged in hope that sparse inode chunks fill to full chunks over time.
415 xfs_align_sparse_ino(
416 struct xfs_mount *mp,
417 xfs_agino_t *startino,
424 agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
425 mod = agbno % mp->m_sb.sb_inoalignmt;
429 /* calculate the inode offset and align startino */
430 offset = XFS_AGB_TO_AGINO(mp, mod);
434 * Since startino has been aligned down, left shift allocmask such that
435 * it continues to represent the same physical inodes relative to the
438 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
442 * Determine whether the source inode record can merge into the target. Both
443 * records must be sparse, the inode ranges must match and there must be no
444 * allocation overlap between the records.
447 __xfs_inobt_can_merge(
448 struct xfs_inobt_rec_incore *trec, /* tgt record */
449 struct xfs_inobt_rec_incore *srec) /* src record */
454 /* records must cover the same inode range */
455 if (trec->ir_startino != srec->ir_startino)
458 /* both records must be sparse */
459 if (!xfs_inobt_issparse(trec->ir_holemask) ||
460 !xfs_inobt_issparse(srec->ir_holemask))
463 /* both records must track some inodes */
464 if (!trec->ir_count || !srec->ir_count)
467 /* can't exceed capacity of a full record */
468 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
471 /* verify there is no allocation overlap */
472 talloc = xfs_inobt_irec_to_allocmask(trec);
473 salloc = xfs_inobt_irec_to_allocmask(srec);
481 * Merge the source inode record into the target. The caller must call
482 * __xfs_inobt_can_merge() to ensure the merge is valid.
485 __xfs_inobt_rec_merge(
486 struct xfs_inobt_rec_incore *trec, /* target */
487 struct xfs_inobt_rec_incore *srec) /* src */
489 ASSERT(trec->ir_startino == srec->ir_startino);
491 /* combine the counts */
492 trec->ir_count += srec->ir_count;
493 trec->ir_freecount += srec->ir_freecount;
496 * Merge the holemask and free mask. For both fields, 0 bits refer to
497 * allocated inodes. We combine the allocated ranges with bitwise AND.
499 trec->ir_holemask &= srec->ir_holemask;
500 trec->ir_free &= srec->ir_free;
504 * Insert a new sparse inode chunk into the associated inode btree. The inode
505 * record for the sparse chunk is pre-aligned to a startino that should match
506 * any pre-existing sparse inode record in the tree. This allows sparse chunks
509 * This function supports two modes of handling preexisting records depending on
510 * the merge flag. If merge is true, the provided record is merged with the
511 * existing record and updated in place. The merged record is returned in nrec.
512 * If merge is false, an existing record is replaced with the provided record.
513 * If no preexisting record exists, the provided record is always inserted.
515 * It is considered corruption if a merge is requested and not possible. Given
516 * the sparse inode alignment constraints, this should never happen.
519 xfs_inobt_insert_sprec(
520 struct xfs_mount *mp,
521 struct xfs_trans *tp,
522 struct xfs_buf *agbp,
524 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */
525 bool merge) /* merge or replace */
527 struct xfs_btree_cur *cur;
528 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
529 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
532 struct xfs_inobt_rec_incore rec;
534 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
536 /* the new record is pre-aligned so we know where to look */
537 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
540 /* if nothing there, insert a new record and return */
542 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
543 nrec->ir_count, nrec->ir_freecount,
547 if (XFS_IS_CORRUPT(mp, i != 1)) {
548 error = -EFSCORRUPTED;
556 * A record exists at this startino. Merge or replace the record
557 * depending on what we've been asked to do.
560 error = xfs_inobt_get_rec(cur, &rec, &i);
563 if (XFS_IS_CORRUPT(mp, i != 1)) {
564 error = -EFSCORRUPTED;
567 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
568 error = -EFSCORRUPTED;
573 * This should never fail. If we have coexisting records that
574 * cannot merge, something is seriously wrong.
576 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
577 error = -EFSCORRUPTED;
581 trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
582 rec.ir_holemask, nrec->ir_startino,
585 /* merge to nrec to output the updated record */
586 __xfs_inobt_rec_merge(nrec, &rec);
588 trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
591 error = xfs_inobt_rec_check_count(mp, nrec);
596 error = xfs_inobt_update(cur, nrec);
601 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
604 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
609 * Allocate new inodes in the allocation group specified by agbp.
610 * Return 0 for success, else error code.
614 struct xfs_trans *tp,
615 struct xfs_buf *agbp,
619 struct xfs_alloc_arg args;
622 xfs_agino_t newino; /* new first inode's number */
623 xfs_agino_t newlen; /* new number of inodes */
624 int isaligned = 0; /* inode allocation at stripe */
626 /* init. to full chunk */
627 uint16_t allocmask = (uint16_t) -1;
628 struct xfs_inobt_rec_incore rec;
629 struct xfs_perag *pag;
630 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
633 memset(&args, 0, sizeof(args));
635 args.mp = tp->t_mountp;
636 args.fsbno = NULLFSBLOCK;
637 args.oinfo = XFS_RMAP_OINFO_INODES;
640 /* randomly do sparse inode allocations */
641 if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
642 igeo->ialloc_min_blks < igeo->ialloc_blks)
643 do_sparse = prandom_u32() & 1;
647 * Locking will ensure that we don't have two callers in here
650 newlen = igeo->ialloc_inos;
651 if (igeo->maxicount &&
652 percpu_counter_read_positive(&args.mp->m_icount) + newlen >
655 args.minlen = args.maxlen = igeo->ialloc_blks;
657 * First try to allocate inodes contiguous with the last-allocated
658 * chunk of inodes. If the filesystem is striped, this will fill
659 * an entire stripe unit with inodes.
661 agi = XFS_BUF_TO_AGI(agbp);
662 newino = be32_to_cpu(agi->agi_newino);
663 agno = be32_to_cpu(agi->agi_seqno);
664 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
668 if (likely(newino != NULLAGINO &&
669 (args.agbno < be32_to_cpu(agi->agi_length)))) {
670 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
671 args.type = XFS_ALLOCTYPE_THIS_BNO;
675 * We need to take into account alignment here to ensure that
676 * we don't modify the free list if we fail to have an exact
677 * block. If we don't have an exact match, and every oher
678 * attempt allocation attempt fails, we'll end up cancelling
679 * a dirty transaction and shutting down.
681 * For an exact allocation, alignment must be 1,
682 * however we need to take cluster alignment into account when
683 * fixing up the freelist. Use the minalignslop field to
684 * indicate that extra blocks might be required for alignment,
685 * but not to use them in the actual exact allocation.
688 args.minalignslop = igeo->cluster_align - 1;
690 /* Allow space for the inode btree to split. */
691 args.minleft = igeo->inobt_maxlevels - 1;
692 if ((error = xfs_alloc_vextent(&args)))
696 * This request might have dirtied the transaction if the AG can
697 * satisfy the request, but the exact block was not available.
698 * If the allocation did fail, subsequent requests will relax
699 * the exact agbno requirement and increase the alignment
700 * instead. It is critical that the total size of the request
701 * (len + alignment + slop) does not increase from this point
702 * on, so reset minalignslop to ensure it is not included in
703 * subsequent requests.
705 args.minalignslop = 0;
708 if (unlikely(args.fsbno == NULLFSBLOCK)) {
710 * Set the alignment for the allocation.
711 * If stripe alignment is turned on then align at stripe unit
713 * If the cluster size is smaller than a filesystem block
714 * then we're doing I/O for inodes in filesystem block size
715 * pieces, so don't need alignment anyway.
718 if (igeo->ialloc_align) {
719 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
720 args.alignment = args.mp->m_dalign;
723 args.alignment = igeo->cluster_align;
725 * Need to figure out where to allocate the inode blocks.
726 * Ideally they should be spaced out through the a.g.
727 * For now, just allocate blocks up front.
729 args.agbno = be32_to_cpu(agi->agi_root);
730 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
732 * Allocate a fixed-size extent of inodes.
734 args.type = XFS_ALLOCTYPE_NEAR_BNO;
737 * Allow space for the inode btree to split.
739 args.minleft = igeo->inobt_maxlevels - 1;
740 if ((error = xfs_alloc_vextent(&args)))
745 * If stripe alignment is turned on, then try again with cluster
748 if (isaligned && args.fsbno == NULLFSBLOCK) {
749 args.type = XFS_ALLOCTYPE_NEAR_BNO;
750 args.agbno = be32_to_cpu(agi->agi_root);
751 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
752 args.alignment = igeo->cluster_align;
753 if ((error = xfs_alloc_vextent(&args)))
758 * Finally, try a sparse allocation if the filesystem supports it and
759 * the sparse allocation length is smaller than a full chunk.
761 if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
762 igeo->ialloc_min_blks < igeo->ialloc_blks &&
763 args.fsbno == NULLFSBLOCK) {
765 args.type = XFS_ALLOCTYPE_NEAR_BNO;
766 args.agbno = be32_to_cpu(agi->agi_root);
767 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
768 args.alignment = args.mp->m_sb.sb_spino_align;
771 args.minlen = igeo->ialloc_min_blks;
772 args.maxlen = args.minlen;
775 * The inode record will be aligned to full chunk size. We must
776 * prevent sparse allocation from AG boundaries that result in
777 * invalid inode records, such as records that start at agbno 0
778 * or extend beyond the AG.
780 * Set min agbno to the first aligned, non-zero agbno and max to
781 * the last aligned agbno that is at least one full chunk from
784 args.min_agbno = args.mp->m_sb.sb_inoalignmt;
785 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
786 args.mp->m_sb.sb_inoalignmt) -
789 error = xfs_alloc_vextent(&args);
793 newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
794 ASSERT(newlen <= XFS_INODES_PER_CHUNK);
795 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
798 if (args.fsbno == NULLFSBLOCK) {
802 ASSERT(args.len == args.minlen);
805 * Stamp and write the inode buffers.
807 * Seed the new inode cluster with a random generation number. This
808 * prevents short-term reuse of generation numbers if a chunk is
809 * freed and then immediately reallocated. We use random numbers
810 * rather than a linear progression to prevent the next generation
811 * number from being easily guessable.
813 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
814 args.agbno, args.len, prandom_u32());
819 * Convert the results.
821 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
823 if (xfs_inobt_issparse(~allocmask)) {
825 * We've allocated a sparse chunk. Align the startino and mask.
827 xfs_align_sparse_ino(args.mp, &newino, &allocmask);
829 rec.ir_startino = newino;
830 rec.ir_holemask = ~allocmask;
831 rec.ir_count = newlen;
832 rec.ir_freecount = newlen;
833 rec.ir_free = XFS_INOBT_ALL_FREE;
836 * Insert the sparse record into the inobt and allow for a merge
837 * if necessary. If a merge does occur, rec is updated to the
840 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
842 if (error == -EFSCORRUPTED) {
844 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
845 XFS_AGINO_TO_INO(args.mp, agno,
847 rec.ir_holemask, rec.ir_count);
848 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
854 * We can't merge the part we've just allocated as for the inobt
855 * due to finobt semantics. The original record may or may not
856 * exist independent of whether physical inodes exist in this
859 * We must update the finobt record based on the inobt record.
860 * rec contains the fully merged and up to date inobt record
861 * from the previous call. Set merge false to replace any
862 * existing record with this one.
864 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
865 error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
866 XFS_BTNUM_FINO, &rec,
872 /* full chunk - insert new records to both btrees */
873 error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
878 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
879 error = xfs_inobt_insert(args.mp, tp, agbp, newino,
880 newlen, XFS_BTNUM_FINO);
887 * Update AGI counts and newino.
889 be32_add_cpu(&agi->agi_count, newlen);
890 be32_add_cpu(&agi->agi_freecount, newlen);
891 pag = xfs_perag_get(args.mp, agno);
892 pag->pagi_freecount += newlen;
893 pag->pagi_count += newlen;
895 agi->agi_newino = cpu_to_be32(newino);
898 * Log allocation group header fields
900 xfs_ialloc_log_agi(tp, agbp,
901 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
903 * Modify/log superblock values for inode count and inode free count.
905 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
906 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
911 STATIC xfs_agnumber_t
917 spin_lock(&mp->m_agirotor_lock);
918 agno = mp->m_agirotor;
919 if (++mp->m_agirotor >= mp->m_maxagi)
921 spin_unlock(&mp->m_agirotor_lock);
927 * Select an allocation group to look for a free inode in, based on the parent
928 * inode and the mode. Return the allocation group buffer.
930 STATIC xfs_agnumber_t
931 xfs_ialloc_ag_select(
932 xfs_trans_t *tp, /* transaction pointer */
933 xfs_ino_t parent, /* parent directory inode number */
934 umode_t mode) /* bits set to indicate file type */
936 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
937 xfs_agnumber_t agno; /* current ag number */
938 int flags; /* alloc buffer locking flags */
939 xfs_extlen_t ineed; /* blocks needed for inode allocation */
940 xfs_extlen_t longest = 0; /* longest extent available */
941 xfs_mount_t *mp; /* mount point structure */
942 int needspace; /* file mode implies space allocated */
943 xfs_perag_t *pag; /* per allocation group data */
944 xfs_agnumber_t pagno; /* parent (starting) ag number */
948 * Files of these types need at least one block if length > 0
949 * (and they won't fit in the inode, but that's hard to figure out).
951 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
953 agcount = mp->m_maxagi;
955 pagno = xfs_ialloc_next_ag(mp);
957 pagno = XFS_INO_TO_AGNO(mp, parent);
958 if (pagno >= agcount)
962 ASSERT(pagno < agcount);
965 * Loop through allocation groups, looking for one with a little
966 * free space in it. Note we don't look for free inodes, exactly.
967 * Instead, we include whether there is a need to allocate inodes
968 * to mean that blocks must be allocated for them,
969 * if none are currently free.
972 flags = XFS_ALLOC_FLAG_TRYLOCK;
974 pag = xfs_perag_get(mp, agno);
975 if (!pag->pagi_inodeok) {
976 xfs_ialloc_next_ag(mp);
980 if (!pag->pagi_init) {
981 error = xfs_ialloc_pagi_init(mp, tp, agno);
986 if (pag->pagi_freecount) {
991 if (!pag->pagf_init) {
992 error = xfs_alloc_pagf_init(mp, tp, agno, flags);
998 * Check that there is enough free space for the file plus a
999 * chunk of inodes if we need to allocate some. If this is the
1000 * first pass across the AGs, take into account the potential
1001 * space needed for alignment of inode chunks when checking the
1002 * longest contiguous free space in the AG - this prevents us
1003 * from getting ENOSPC because we have free space larger than
1004 * ialloc_blks but alignment constraints prevent us from using
1007 * If we can't find an AG with space for full alignment slack to
1008 * be taken into account, we must be near ENOSPC in all AGs.
1009 * Hence we don't include alignment for the second pass and so
1010 * if we fail allocation due to alignment issues then it is most
1011 * likely a real ENOSPC condition.
1013 ineed = M_IGEO(mp)->ialloc_min_blks;
1014 if (flags && ineed > 1)
1015 ineed += M_IGEO(mp)->cluster_align;
1016 longest = pag->pagf_longest;
1018 longest = pag->pagf_flcount > 0;
1020 if (pag->pagf_freeblks >= needspace + ineed &&
1028 * No point in iterating over the rest, if we're shutting
1031 if (XFS_FORCED_SHUTDOWN(mp))
1032 return NULLAGNUMBER;
1034 if (agno >= agcount)
1036 if (agno == pagno) {
1038 return NULLAGNUMBER;
1045 * Try to retrieve the next record to the left/right from the current one.
1048 xfs_ialloc_next_rec(
1049 struct xfs_btree_cur *cur,
1050 xfs_inobt_rec_incore_t *rec,
1058 error = xfs_btree_decrement(cur, 0, &i);
1060 error = xfs_btree_increment(cur, 0, &i);
1066 error = xfs_inobt_get_rec(cur, rec, &i);
1069 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1070 return -EFSCORRUPTED;
1078 struct xfs_btree_cur *cur,
1080 xfs_inobt_rec_incore_t *rec,
1086 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
1091 error = xfs_inobt_get_rec(cur, rec, &i);
1094 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1095 return -EFSCORRUPTED;
1102 * Return the offset of the first free inode in the record. If the inode chunk
1103 * is sparsely allocated, we convert the record holemask to inode granularity
1104 * and mask off the unallocated regions from the inode free mask.
1107 xfs_inobt_first_free_inode(
1108 struct xfs_inobt_rec_incore *rec)
1110 xfs_inofree_t realfree;
1112 /* if there are no holes, return the first available offset */
1113 if (!xfs_inobt_issparse(rec->ir_holemask))
1114 return xfs_lowbit64(rec->ir_free);
1116 realfree = xfs_inobt_irec_to_allocmask(rec);
1117 realfree &= rec->ir_free;
1119 return xfs_lowbit64(realfree);
1123 * Allocate an inode using the inobt-only algorithm.
1126 xfs_dialloc_ag_inobt(
1127 struct xfs_trans *tp,
1128 struct xfs_buf *agbp,
1132 struct xfs_mount *mp = tp->t_mountp;
1133 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
1134 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
1135 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1136 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1137 struct xfs_perag *pag;
1138 struct xfs_btree_cur *cur, *tcur;
1139 struct xfs_inobt_rec_incore rec, trec;
1144 int searchdistance = 10;
1146 pag = xfs_perag_get(mp, agno);
1148 ASSERT(pag->pagi_init);
1149 ASSERT(pag->pagi_inodeok);
1150 ASSERT(pag->pagi_freecount > 0);
1153 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1155 * If pagino is 0 (this is the root inode allocation) use newino.
1156 * This must work because we've just allocated some.
1159 pagino = be32_to_cpu(agi->agi_newino);
1161 error = xfs_check_agi_freecount(cur, agi);
1166 * If in the same AG as the parent, try to get near the parent.
1168 if (pagno == agno) {
1169 int doneleft; /* done, to the left */
1170 int doneright; /* done, to the right */
1172 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1175 if (XFS_IS_CORRUPT(mp, i != 1)) {
1176 error = -EFSCORRUPTED;
1180 error = xfs_inobt_get_rec(cur, &rec, &j);
1183 if (XFS_IS_CORRUPT(mp, j != 1)) {
1184 error = -EFSCORRUPTED;
1188 if (rec.ir_freecount > 0) {
1190 * Found a free inode in the same chunk
1191 * as the parent, done.
1198 * In the same AG as parent, but parent's chunk is full.
1201 /* duplicate the cursor, search left & right simultaneously */
1202 error = xfs_btree_dup_cursor(cur, &tcur);
1207 * Skip to last blocks looked up if same parent inode.
1209 if (pagino != NULLAGINO &&
1210 pag->pagl_pagino == pagino &&
1211 pag->pagl_leftrec != NULLAGINO &&
1212 pag->pagl_rightrec != NULLAGINO) {
1213 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1218 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1223 /* search left with tcur, back up 1 record */
1224 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1228 /* search right with cur, go forward 1 record. */
1229 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1235 * Loop until we find an inode chunk with a free inode.
1237 while (--searchdistance > 0 && (!doneleft || !doneright)) {
1238 int useleft; /* using left inode chunk this time */
1240 /* figure out the closer block if both are valid. */
1241 if (!doneleft && !doneright) {
1243 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1244 rec.ir_startino - pagino;
1246 useleft = !doneleft;
1249 /* free inodes to the left? */
1250 if (useleft && trec.ir_freecount) {
1251 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1254 pag->pagl_leftrec = trec.ir_startino;
1255 pag->pagl_rightrec = rec.ir_startino;
1256 pag->pagl_pagino = pagino;
1261 /* free inodes to the right? */
1262 if (!useleft && rec.ir_freecount) {
1263 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1265 pag->pagl_leftrec = trec.ir_startino;
1266 pag->pagl_rightrec = rec.ir_startino;
1267 pag->pagl_pagino = pagino;
1271 /* get next record to check */
1273 error = xfs_ialloc_next_rec(tcur, &trec,
1276 error = xfs_ialloc_next_rec(cur, &rec,
1283 if (searchdistance <= 0) {
1285 * Not in range - save last search
1286 * location and allocate a new inode
1288 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1289 pag->pagl_leftrec = trec.ir_startino;
1290 pag->pagl_rightrec = rec.ir_startino;
1291 pag->pagl_pagino = pagino;
1295 * We've reached the end of the btree. because
1296 * we are only searching a small chunk of the
1297 * btree each search, there is obviously free
1298 * inodes closer to the parent inode than we
1299 * are now. restart the search again.
1301 pag->pagl_pagino = NULLAGINO;
1302 pag->pagl_leftrec = NULLAGINO;
1303 pag->pagl_rightrec = NULLAGINO;
1304 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1305 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1311 * In a different AG from the parent.
1312 * See if the most recently allocated block has any free.
1314 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1315 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1321 error = xfs_inobt_get_rec(cur, &rec, &j);
1325 if (j == 1 && rec.ir_freecount > 0) {
1327 * The last chunk allocated in the group
1328 * still has a free inode.
1336 * None left in the last group, search the whole AG
1338 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1341 if (XFS_IS_CORRUPT(mp, i != 1)) {
1342 error = -EFSCORRUPTED;
1347 error = xfs_inobt_get_rec(cur, &rec, &i);
1350 if (XFS_IS_CORRUPT(mp, i != 1)) {
1351 error = -EFSCORRUPTED;
1354 if (rec.ir_freecount > 0)
1356 error = xfs_btree_increment(cur, 0, &i);
1359 if (XFS_IS_CORRUPT(mp, i != 1)) {
1360 error = -EFSCORRUPTED;
1366 offset = xfs_inobt_first_free_inode(&rec);
1367 ASSERT(offset >= 0);
1368 ASSERT(offset < XFS_INODES_PER_CHUNK);
1369 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1370 XFS_INODES_PER_CHUNK) == 0);
1371 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1372 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1374 error = xfs_inobt_update(cur, &rec);
1377 be32_add_cpu(&agi->agi_freecount, -1);
1378 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1379 pag->pagi_freecount--;
1381 error = xfs_check_agi_freecount(cur, agi);
1385 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1386 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1391 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1393 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1399 * Use the free inode btree to allocate an inode based on distance from the
1400 * parent. Note that the provided cursor may be deleted and replaced.
1403 xfs_dialloc_ag_finobt_near(
1405 struct xfs_btree_cur **ocur,
1406 struct xfs_inobt_rec_incore *rec)
1408 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */
1409 struct xfs_btree_cur *rcur; /* right search cursor */
1410 struct xfs_inobt_rec_incore rrec;
1414 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1419 error = xfs_inobt_get_rec(lcur, rec, &i);
1422 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
1423 return -EFSCORRUPTED;
1426 * See if we've landed in the parent inode record. The finobt
1427 * only tracks chunks with at least one free inode, so record
1428 * existence is enough.
1430 if (pagino >= rec->ir_startino &&
1431 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1435 error = xfs_btree_dup_cursor(lcur, &rcur);
1439 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1443 error = xfs_inobt_get_rec(rcur, &rrec, &j);
1446 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
1447 error = -EFSCORRUPTED;
1452 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
1453 error = -EFSCORRUPTED;
1456 if (i == 1 && j == 1) {
1458 * Both the left and right records are valid. Choose the closer
1459 * inode chunk to the target.
1461 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1462 (rrec.ir_startino - pagino)) {
1464 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1467 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1469 } else if (j == 1) {
1470 /* only the right record is valid */
1472 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1474 } else if (i == 1) {
1475 /* only the left record is valid */
1476 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1482 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1487 * Use the free inode btree to find a free inode based on a newino hint. If
1488 * the hint is NULL, find the first free inode in the AG.
1491 xfs_dialloc_ag_finobt_newino(
1492 struct xfs_agi *agi,
1493 struct xfs_btree_cur *cur,
1494 struct xfs_inobt_rec_incore *rec)
1499 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1500 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1505 error = xfs_inobt_get_rec(cur, rec, &i);
1508 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1509 return -EFSCORRUPTED;
1515 * Find the first inode available in the AG.
1517 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1520 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1521 return -EFSCORRUPTED;
1523 error = xfs_inobt_get_rec(cur, rec, &i);
1526 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1527 return -EFSCORRUPTED;
1533 * Update the inobt based on a modification made to the finobt. Also ensure that
1534 * the records from both trees are equivalent post-modification.
1537 xfs_dialloc_ag_update_inobt(
1538 struct xfs_btree_cur *cur, /* inobt cursor */
1539 struct xfs_inobt_rec_incore *frec, /* finobt record */
1540 int offset) /* inode offset */
1542 struct xfs_inobt_rec_incore rec;
1546 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1549 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1550 return -EFSCORRUPTED;
1552 error = xfs_inobt_get_rec(cur, &rec, &i);
1555 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1556 return -EFSCORRUPTED;
1557 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1558 XFS_INODES_PER_CHUNK) == 0);
1560 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1563 if (XFS_IS_CORRUPT(cur->bc_mp,
1564 rec.ir_free != frec->ir_free ||
1565 rec.ir_freecount != frec->ir_freecount))
1566 return -EFSCORRUPTED;
1568 return xfs_inobt_update(cur, &rec);
1572 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1573 * back to the inobt search algorithm.
1575 * The caller selected an AG for us, and made sure that free inodes are
1580 struct xfs_trans *tp,
1581 struct xfs_buf *agbp,
1585 struct xfs_mount *mp = tp->t_mountp;
1586 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
1587 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
1588 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1589 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1590 struct xfs_perag *pag;
1591 struct xfs_btree_cur *cur; /* finobt cursor */
1592 struct xfs_btree_cur *icur; /* inobt cursor */
1593 struct xfs_inobt_rec_incore rec;
1599 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
1600 return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
1602 pag = xfs_perag_get(mp, agno);
1605 * If pagino is 0 (this is the root inode allocation) use newino.
1606 * This must work because we've just allocated some.
1609 pagino = be32_to_cpu(agi->agi_newino);
1611 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
1613 error = xfs_check_agi_freecount(cur, agi);
1618 * The search algorithm depends on whether we're in the same AG as the
1619 * parent. If so, find the closest available inode to the parent. If
1620 * not, consider the agi hint or find the first free inode in the AG.
1623 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1625 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1629 offset = xfs_inobt_first_free_inode(&rec);
1630 ASSERT(offset >= 0);
1631 ASSERT(offset < XFS_INODES_PER_CHUNK);
1632 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1633 XFS_INODES_PER_CHUNK) == 0);
1634 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1637 * Modify or remove the finobt record.
1639 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1641 if (rec.ir_freecount)
1642 error = xfs_inobt_update(cur, &rec);
1644 error = xfs_btree_delete(cur, &i);
1649 * The finobt has now been updated appropriately. We haven't updated the
1650 * agi and superblock yet, so we can create an inobt cursor and validate
1651 * the original freecount. If all is well, make the equivalent update to
1652 * the inobt using the finobt record and offset information.
1654 icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1656 error = xfs_check_agi_freecount(icur, agi);
1660 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1665 * Both trees have now been updated. We must update the perag and
1666 * superblock before we can check the freecount for each btree.
1668 be32_add_cpu(&agi->agi_freecount, -1);
1669 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1670 pag->pagi_freecount--;
1672 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1674 error = xfs_check_agi_freecount(icur, agi);
1677 error = xfs_check_agi_freecount(cur, agi);
1681 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1682 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1688 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1690 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1696 * Allocate an inode on disk.
1698 * Mode is used to tell whether the new inode will need space, and whether it
1701 * This function is designed to be called twice if it has to do an allocation
1702 * to make more free inodes. On the first call, *IO_agbp should be set to NULL.
1703 * If an inode is available without having to performn an allocation, an inode
1704 * number is returned. In this case, *IO_agbp is set to NULL. If an allocation
1705 * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp.
1706 * The caller should then commit the current transaction, allocate a
1707 * new transaction, and call xfs_dialloc() again, passing in the previous value
1708 * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI
1709 * buffer is locked across the two calls, the second call is guaranteed to have
1710 * a free inode available.
1712 * Once we successfully pick an inode its number is returned and the on-disk
1713 * data structures are updated. The inode itself is not read in, since doing so
1714 * would break ordering constraints with xfs_reclaim.
1718 struct xfs_trans *tp,
1721 struct xfs_buf **IO_agbp,
1724 struct xfs_mount *mp = tp->t_mountp;
1725 struct xfs_buf *agbp;
1726 xfs_agnumber_t agno;
1730 xfs_agnumber_t start_agno;
1731 struct xfs_perag *pag;
1732 struct xfs_ino_geometry *igeo = M_IGEO(mp);
1737 * If the caller passes in a pointer to the AGI buffer,
1738 * continue where we left off before. In this case, we
1739 * know that the allocation group has free inodes.
1746 * We do not have an agbp, so select an initial allocation
1747 * group for inode allocation.
1749 start_agno = xfs_ialloc_ag_select(tp, parent, mode);
1750 if (start_agno == NULLAGNUMBER) {
1756 * If we have already hit the ceiling of inode blocks then clear
1757 * okalloc so we scan all available agi structures for a free
1760 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1761 * which will sacrifice the preciseness but improve the performance.
1763 if (igeo->maxicount &&
1764 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
1765 > igeo->maxicount) {
1771 * Loop until we find an allocation group that either has free inodes
1772 * or in which we can allocate some inodes. Iterate through the
1773 * allocation groups upward, wrapping at the end.
1777 pag = xfs_perag_get(mp, agno);
1778 if (!pag->pagi_inodeok) {
1779 xfs_ialloc_next_ag(mp);
1783 if (!pag->pagi_init) {
1784 error = xfs_ialloc_pagi_init(mp, tp, agno);
1790 * Do a first racy fast path check if this AG is usable.
1792 if (!pag->pagi_freecount && !okalloc)
1796 * Then read in the AGI buffer and recheck with the AGI buffer
1799 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1803 if (pag->pagi_freecount) {
1809 goto nextag_relse_buffer;
1812 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
1814 xfs_trans_brelse(tp, agbp);
1816 if (error != -ENOSPC)
1826 * We successfully allocated some inodes, return
1827 * the current context to the caller so that it
1828 * can commit the current transaction and call
1829 * us again where we left off.
1831 ASSERT(pag->pagi_freecount > 0);
1839 nextag_relse_buffer:
1840 xfs_trans_brelse(tp, agbp);
1843 if (++agno == mp->m_sb.sb_agcount)
1845 if (agno == start_agno) {
1847 return noroom ? -ENOSPC : 0;
1853 return xfs_dialloc_ag(tp, agbp, parent, inop);
1860 * Free the blocks of an inode chunk. We must consider that the inode chunk
1861 * might be sparse and only free the regions that are allocated as part of the
1865 xfs_difree_inode_chunk(
1866 struct xfs_trans *tp,
1867 xfs_agnumber_t agno,
1868 struct xfs_inobt_rec_incore *rec)
1870 struct xfs_mount *mp = tp->t_mountp;
1871 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
1873 int startidx, endidx;
1875 xfs_agblock_t agbno;
1877 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1879 if (!xfs_inobt_issparse(rec->ir_holemask)) {
1880 /* not sparse, calculate extent info directly */
1881 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
1882 M_IGEO(mp)->ialloc_blks,
1883 &XFS_RMAP_OINFO_INODES);
1887 /* holemask is only 16-bits (fits in an unsigned long) */
1888 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1889 holemask[0] = rec->ir_holemask;
1892 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1893 * holemask and convert the start/end index of each range to an extent.
1894 * We start with the start and end index both pointing at the first 0 in
1897 startidx = endidx = find_first_zero_bit(holemask,
1898 XFS_INOBT_HOLEMASK_BITS);
1899 nextbit = startidx + 1;
1900 while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1901 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1904 * If the next zero bit is contiguous, update the end index of
1905 * the current range and continue.
1907 if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1908 nextbit == endidx + 1) {
1914 * nextbit is not contiguous with the current end index. Convert
1915 * the current start/end to an extent and add it to the free
1918 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1919 mp->m_sb.sb_inopblock;
1920 contigblk = ((endidx - startidx + 1) *
1921 XFS_INODES_PER_HOLEMASK_BIT) /
1922 mp->m_sb.sb_inopblock;
1924 ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1925 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1926 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
1927 contigblk, &XFS_RMAP_OINFO_INODES);
1929 /* reset range to current bit and carry on... */
1930 startidx = endidx = nextbit;
1939 struct xfs_mount *mp,
1940 struct xfs_trans *tp,
1941 struct xfs_buf *agbp,
1943 struct xfs_icluster *xic,
1944 struct xfs_inobt_rec_incore *orec)
1946 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
1947 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
1948 struct xfs_perag *pag;
1949 struct xfs_btree_cur *cur;
1950 struct xfs_inobt_rec_incore rec;
1956 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1957 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1960 * Initialize the cursor.
1962 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1964 error = xfs_check_agi_freecount(cur, agi);
1969 * Look for the entry describing this inode.
1971 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1972 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1976 if (XFS_IS_CORRUPT(mp, i != 1)) {
1977 error = -EFSCORRUPTED;
1980 error = xfs_inobt_get_rec(cur, &rec, &i);
1982 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1986 if (XFS_IS_CORRUPT(mp, i != 1)) {
1987 error = -EFSCORRUPTED;
1991 * Get the offset in the inode chunk.
1993 off = agino - rec.ir_startino;
1994 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1995 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1997 * Mark the inode free & increment the count.
1999 rec.ir_free |= XFS_INOBT_MASK(off);
2003 * When an inode chunk is free, it becomes eligible for removal. Don't
2004 * remove the chunk if the block size is large enough for multiple inode
2005 * chunks (that might not be free).
2007 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
2008 rec.ir_free == XFS_INOBT_ALL_FREE &&
2009 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
2010 xic->deleted = true;
2011 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
2012 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
2015 * Remove the inode cluster from the AGI B+Tree, adjust the
2016 * AGI and Superblock inode counts, and mark the disk space
2017 * to be freed when the transaction is committed.
2019 ilen = rec.ir_freecount;
2020 be32_add_cpu(&agi->agi_count, -ilen);
2021 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
2022 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
2023 pag = xfs_perag_get(mp, agno);
2024 pag->pagi_freecount -= ilen - 1;
2025 pag->pagi_count -= ilen;
2027 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
2028 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
2030 if ((error = xfs_btree_delete(cur, &i))) {
2031 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
2036 xfs_difree_inode_chunk(tp, agno, &rec);
2038 xic->deleted = false;
2040 error = xfs_inobt_update(cur, &rec);
2042 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
2048 * Change the inode free counts and log the ag/sb changes.
2050 be32_add_cpu(&agi->agi_freecount, 1);
2051 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2052 pag = xfs_perag_get(mp, agno);
2053 pag->pagi_freecount++;
2055 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2058 error = xfs_check_agi_freecount(cur, agi);
2063 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2067 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2072 * Free an inode in the free inode btree.
2076 struct xfs_mount *mp,
2077 struct xfs_trans *tp,
2078 struct xfs_buf *agbp,
2080 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
2082 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
2083 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
2084 struct xfs_btree_cur *cur;
2085 struct xfs_inobt_rec_incore rec;
2086 int offset = agino - ibtrec->ir_startino;
2090 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
2092 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2097 * If the record does not exist in the finobt, we must have just
2098 * freed an inode in a previously fully allocated chunk. If not,
2099 * something is out of sync.
2101 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
2102 error = -EFSCORRUPTED;
2106 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2108 ibtrec->ir_freecount,
2109 ibtrec->ir_free, &i);
2118 * Read and update the existing record. We could just copy the ibtrec
2119 * across here, but that would defeat the purpose of having redundant
2120 * metadata. By making the modifications independently, we can catch
2121 * corruptions that we wouldn't see if we just copied from one record
2124 error = xfs_inobt_get_rec(cur, &rec, &i);
2127 if (XFS_IS_CORRUPT(mp, i != 1)) {
2128 error = -EFSCORRUPTED;
2132 rec.ir_free |= XFS_INOBT_MASK(offset);
2135 if (XFS_IS_CORRUPT(mp,
2136 rec.ir_free != ibtrec->ir_free ||
2137 rec.ir_freecount != ibtrec->ir_freecount)) {
2138 error = -EFSCORRUPTED;
2143 * The content of inobt records should always match between the inobt
2144 * and finobt. The lifecycle of records in the finobt is different from
2145 * the inobt in that the finobt only tracks records with at least one
2146 * free inode. Hence, if all of the inodes are free and we aren't
2147 * keeping inode chunks permanently on disk, remove the record.
2148 * Otherwise, update the record with the new information.
2150 * Note that we currently can't free chunks when the block size is large
2151 * enough for multiple chunks. Leave the finobt record to remain in sync
2154 if (rec.ir_free == XFS_INOBT_ALL_FREE &&
2155 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
2156 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
2157 error = xfs_btree_delete(cur, &i);
2162 error = xfs_inobt_update(cur, &rec);
2168 error = xfs_check_agi_freecount(cur, agi);
2172 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2176 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2181 * Free disk inode. Carefully avoids touching the incore inode, all
2182 * manipulations incore are the caller's responsibility.
2183 * The on-disk inode is not changed by this operation, only the
2184 * btree (free inode mask) is changed.
2188 struct xfs_trans *tp, /* transaction pointer */
2189 xfs_ino_t inode, /* inode to be freed */
2190 struct xfs_icluster *xic) /* cluster info if deleted */
2193 xfs_agblock_t agbno; /* block number containing inode */
2194 struct xfs_buf *agbp; /* buffer for allocation group header */
2195 xfs_agino_t agino; /* allocation group inode number */
2196 xfs_agnumber_t agno; /* allocation group number */
2197 int error; /* error return value */
2198 struct xfs_mount *mp; /* mount structure for filesystem */
2199 struct xfs_inobt_rec_incore rec;/* btree record */
2204 * Break up inode number into its components.
2206 agno = XFS_INO_TO_AGNO(mp, inode);
2207 if (agno >= mp->m_sb.sb_agcount) {
2208 xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
2209 __func__, agno, mp->m_sb.sb_agcount);
2213 agino = XFS_INO_TO_AGINO(mp, inode);
2214 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
2215 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2216 __func__, (unsigned long long)inode,
2217 (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
2221 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2222 if (agbno >= mp->m_sb.sb_agblocks) {
2223 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2224 __func__, agbno, mp->m_sb.sb_agblocks);
2229 * Get the allocation group header.
2231 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2233 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2239 * Fix up the inode allocation btree.
2241 error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
2246 * Fix up the free inode btree.
2248 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2249 error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
2262 struct xfs_mount *mp,
2263 struct xfs_trans *tp,
2264 xfs_agnumber_t agno,
2266 xfs_agblock_t agbno,
2267 xfs_agblock_t *chunk_agbno,
2268 xfs_agblock_t *offset_agbno,
2271 struct xfs_inobt_rec_incore rec;
2272 struct xfs_btree_cur *cur;
2273 struct xfs_buf *agbp;
2277 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2280 "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2281 __func__, error, agno);
2286 * Lookup the inode record for the given agino. If the record cannot be
2287 * found, then it's an invalid inode number and we should abort. Once
2288 * we have a record, we need to ensure it contains the inode number
2289 * we are looking up.
2291 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
2292 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2295 error = xfs_inobt_get_rec(cur, &rec, &i);
2296 if (!error && i == 0)
2300 xfs_trans_brelse(tp, agbp);
2301 xfs_btree_del_cursor(cur, error);
2305 /* check that the returned record contains the required inode */
2306 if (rec.ir_startino > agino ||
2307 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
2310 /* for untrusted inodes check it is allocated first */
2311 if ((flags & XFS_IGET_UNTRUSTED) &&
2312 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2315 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2316 *offset_agbno = agbno - *chunk_agbno;
2321 * Return the location of the inode in imap, for mapping it into a buffer.
2325 xfs_mount_t *mp, /* file system mount structure */
2326 xfs_trans_t *tp, /* transaction pointer */
2327 xfs_ino_t ino, /* inode to locate */
2328 struct xfs_imap *imap, /* location map structure */
2329 uint flags) /* flags for inode btree lookup */
2331 xfs_agblock_t agbno; /* block number of inode in the alloc group */
2332 xfs_agino_t agino; /* inode number within alloc group */
2333 xfs_agnumber_t agno; /* allocation group number */
2334 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
2335 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
2336 int error; /* error code */
2337 int offset; /* index of inode in its buffer */
2338 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
2340 ASSERT(ino != NULLFSINO);
2343 * Split up the inode number into its parts.
2345 agno = XFS_INO_TO_AGNO(mp, ino);
2346 agino = XFS_INO_TO_AGINO(mp, ino);
2347 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2348 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
2349 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2352 * Don't output diagnostic information for untrusted inodes
2353 * as they can be invalid without implying corruption.
2355 if (flags & XFS_IGET_UNTRUSTED)
2357 if (agno >= mp->m_sb.sb_agcount) {
2359 "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2360 __func__, agno, mp->m_sb.sb_agcount);
2362 if (agbno >= mp->m_sb.sb_agblocks) {
2364 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2365 __func__, (unsigned long long)agbno,
2366 (unsigned long)mp->m_sb.sb_agblocks);
2368 if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2370 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2372 XFS_AGINO_TO_INO(mp, agno, agino));
2380 * For bulkstat and handle lookups, we have an untrusted inode number
2381 * that we have to verify is valid. We cannot do this just by reading
2382 * the inode buffer as it may have been unlinked and removed leaving
2383 * inodes in stale state on disk. Hence we have to do a btree lookup
2384 * in all cases where an untrusted inode number is passed.
2386 if (flags & XFS_IGET_UNTRUSTED) {
2387 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2388 &chunk_agbno, &offset_agbno, flags);
2395 * If the inode cluster size is the same as the blocksize or
2396 * smaller we get to the buffer by simple arithmetics.
2398 if (M_IGEO(mp)->blocks_per_cluster == 1) {
2399 offset = XFS_INO_TO_OFFSET(mp, ino);
2400 ASSERT(offset < mp->m_sb.sb_inopblock);
2402 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
2403 imap->im_len = XFS_FSB_TO_BB(mp, 1);
2404 imap->im_boffset = (unsigned short)(offset <<
2405 mp->m_sb.sb_inodelog);
2410 * If the inode chunks are aligned then use simple maths to
2411 * find the location. Otherwise we have to do a btree
2412 * lookup to find the location.
2414 if (M_IGEO(mp)->inoalign_mask) {
2415 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
2416 chunk_agbno = agbno - offset_agbno;
2418 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2419 &chunk_agbno, &offset_agbno, flags);
2425 ASSERT(agbno >= chunk_agbno);
2426 cluster_agbno = chunk_agbno +
2427 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
2428 M_IGEO(mp)->blocks_per_cluster);
2429 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2430 XFS_INO_TO_OFFSET(mp, ino);
2432 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
2433 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
2434 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2437 * If the inode number maps to a block outside the bounds
2438 * of the file system then return NULL rather than calling
2439 * read_buf and panicing when we get an error from the
2442 if ((imap->im_blkno + imap->im_len) >
2443 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2445 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2446 __func__, (unsigned long long) imap->im_blkno,
2447 (unsigned long long) imap->im_len,
2448 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2455 * Log specified fields for the ag hdr (inode section). The growth of the agi
2456 * structure over time requires that we interpret the buffer as two logical
2457 * regions delineated by the end of the unlinked list. This is due to the size
2458 * of the hash table and its location in the middle of the agi.
2460 * For example, a request to log a field before agi_unlinked and a field after
2461 * agi_unlinked could cause us to log the entire hash table and use an excessive
2462 * amount of log space. To avoid this behavior, log the region up through
2463 * agi_unlinked in one call and the region after agi_unlinked through the end of
2464 * the structure in another.
2468 xfs_trans_t *tp, /* transaction pointer */
2469 xfs_buf_t *bp, /* allocation group header buffer */
2470 int fields) /* bitmask of fields to log */
2472 int first; /* first byte number */
2473 int last; /* last byte number */
2474 static const short offsets[] = { /* field starting offsets */
2475 /* keep in sync with bit definitions */
2476 offsetof(xfs_agi_t, agi_magicnum),
2477 offsetof(xfs_agi_t, agi_versionnum),
2478 offsetof(xfs_agi_t, agi_seqno),
2479 offsetof(xfs_agi_t, agi_length),
2480 offsetof(xfs_agi_t, agi_count),
2481 offsetof(xfs_agi_t, agi_root),
2482 offsetof(xfs_agi_t, agi_level),
2483 offsetof(xfs_agi_t, agi_freecount),
2484 offsetof(xfs_agi_t, agi_newino),
2485 offsetof(xfs_agi_t, agi_dirino),
2486 offsetof(xfs_agi_t, agi_unlinked),
2487 offsetof(xfs_agi_t, agi_free_root),
2488 offsetof(xfs_agi_t, agi_free_level),
2492 xfs_agi_t *agi; /* allocation group header */
2494 agi = XFS_BUF_TO_AGI(bp);
2495 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2499 * Compute byte offsets for the first and last fields in the first
2500 * region and log the agi buffer. This only logs up through
2503 if (fields & XFS_AGI_ALL_BITS_R1) {
2504 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2506 xfs_trans_log_buf(tp, bp, first, last);
2510 * Mask off the bits in the first region and calculate the first and
2511 * last field offsets for any bits in the second region.
2513 fields &= ~XFS_AGI_ALL_BITS_R1;
2515 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2517 xfs_trans_log_buf(tp, bp, first, last);
2521 static xfs_failaddr_t
2525 struct xfs_mount *mp = bp->b_mount;
2526 struct xfs_agi *agi = XFS_BUF_TO_AGI(bp);
2529 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2530 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2531 return __this_address;
2532 if (!xfs_log_check_lsn(mp,
2533 be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
2534 return __this_address;
2538 * Validate the magic number of the agi block.
2540 if (!xfs_verify_magic(bp, agi->agi_magicnum))
2541 return __this_address;
2542 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2543 return __this_address;
2545 if (be32_to_cpu(agi->agi_level) < 1 ||
2546 be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
2547 return __this_address;
2549 if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
2550 (be32_to_cpu(agi->agi_free_level) < 1 ||
2551 be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
2552 return __this_address;
2555 * during growfs operations, the perag is not fully initialised,
2556 * so we can't use it for any useful checking. growfs ensures we can't
2557 * use it by using uncached buffers that don't have the perag attached
2558 * so we can detect and avoid this problem.
2560 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
2561 return __this_address;
2563 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
2564 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
2566 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
2567 return __this_address;
2574 xfs_agi_read_verify(
2577 struct xfs_mount *mp = bp->b_mount;
2580 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2581 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2582 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2584 fa = xfs_agi_verify(bp);
2585 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
2586 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2591 xfs_agi_write_verify(
2594 struct xfs_mount *mp = bp->b_mount;
2595 struct xfs_buf_log_item *bip = bp->b_log_item;
2598 fa = xfs_agi_verify(bp);
2600 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2604 if (!xfs_sb_version_hascrc(&mp->m_sb))
2608 XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2609 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2612 const struct xfs_buf_ops xfs_agi_buf_ops = {
2614 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
2615 .verify_read = xfs_agi_read_verify,
2616 .verify_write = xfs_agi_write_verify,
2617 .verify_struct = xfs_agi_verify,
2621 * Read in the allocation group header (inode allocation section)
2625 struct xfs_mount *mp, /* file system mount structure */
2626 struct xfs_trans *tp, /* transaction pointer */
2627 xfs_agnumber_t agno, /* allocation group number */
2628 struct xfs_buf **bpp) /* allocation group hdr buf */
2632 trace_xfs_read_agi(mp, agno);
2634 ASSERT(agno != NULLAGNUMBER);
2635 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2636 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2637 XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
2641 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
2643 xfs_buf_set_ref(*bpp, XFS_AGI_REF);
2648 xfs_ialloc_read_agi(
2649 struct xfs_mount *mp, /* file system mount structure */
2650 struct xfs_trans *tp, /* transaction pointer */
2651 xfs_agnumber_t agno, /* allocation group number */
2652 struct xfs_buf **bpp) /* allocation group hdr buf */
2654 struct xfs_agi *agi; /* allocation group header */
2655 struct xfs_perag *pag; /* per allocation group data */
2658 trace_xfs_ialloc_read_agi(mp, agno);
2660 error = xfs_read_agi(mp, tp, agno, bpp);
2664 agi = XFS_BUF_TO_AGI(*bpp);
2665 pag = xfs_perag_get(mp, agno);
2666 if (!pag->pagi_init) {
2667 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2668 pag->pagi_count = be32_to_cpu(agi->agi_count);
2673 * It's possible for these to be out of sync if
2674 * we are in the middle of a forced shutdown.
2676 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2677 XFS_FORCED_SHUTDOWN(mp));
2683 * Read in the agi to initialise the per-ag data in the mount structure
2686 xfs_ialloc_pagi_init(
2687 xfs_mount_t *mp, /* file system mount structure */
2688 xfs_trans_t *tp, /* transaction pointer */
2689 xfs_agnumber_t agno) /* allocation group number */
2691 xfs_buf_t *bp = NULL;
2694 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
2698 xfs_trans_brelse(tp, bp);
2702 /* Is there an inode record covering a given range of inode numbers? */
2704 xfs_ialloc_has_inode_record(
2705 struct xfs_btree_cur *cur,
2710 struct xfs_inobt_rec_incore irec;
2718 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
2719 while (error == 0 && has_record) {
2720 error = xfs_inobt_get_rec(cur, &irec, &has_record);
2721 if (error || irec.ir_startino > high)
2724 agino = irec.ir_startino;
2725 holemask = irec.ir_holemask;
2726 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
2727 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
2730 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
2737 error = xfs_btree_increment(cur, 0, &has_record);
2742 /* Is there an inode record covering a given extent? */
2744 xfs_ialloc_has_inodes_at_extent(
2745 struct xfs_btree_cur *cur,
2753 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
2754 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
2756 return xfs_ialloc_has_inode_record(cur, low, high, exists);
2759 struct xfs_ialloc_count_inodes {
2761 xfs_agino_t freecount;
2764 /* Record inode counts across all inobt records. */
2766 xfs_ialloc_count_inodes_rec(
2767 struct xfs_btree_cur *cur,
2768 union xfs_btree_rec *rec,
2771 struct xfs_inobt_rec_incore irec;
2772 struct xfs_ialloc_count_inodes *ci = priv;
2774 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
2775 ci->count += irec.ir_count;
2776 ci->freecount += irec.ir_freecount;
2781 /* Count allocated and free inodes under an inobt. */
2783 xfs_ialloc_count_inodes(
2784 struct xfs_btree_cur *cur,
2786 xfs_agino_t *freecount)
2788 struct xfs_ialloc_count_inodes ci = {0};
2791 ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
2792 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
2797 *freecount = ci.freecount;
2802 * Initialize inode-related geometry information.
2804 * Compute the inode btree min and max levels and set maxicount.
2806 * Set the inode cluster size. This may still be overridden by the file
2807 * system block size if it is larger than the chosen cluster size.
2809 * For v5 filesystems, scale the cluster size with the inode size to keep a
2810 * constant ratio of inode per cluster buffer, but only if mkfs has set the
2811 * inode alignment value appropriately for larger cluster sizes.
2813 * Then compute the inode cluster alignment information.
2816 xfs_ialloc_setup_geometry(
2817 struct xfs_mount *mp)
2819 struct xfs_sb *sbp = &mp->m_sb;
2820 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2824 /* Compute inode btree geometry. */
2825 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
2826 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
2827 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
2828 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
2829 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
2831 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
2833 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
2835 if (sbp->sb_spino_align)
2836 igeo->ialloc_min_blks = sbp->sb_spino_align;
2838 igeo->ialloc_min_blks = igeo->ialloc_blks;
2840 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
2841 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2842 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
2846 * Set the maximum inode count for this filesystem, being careful not
2847 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
2848 * users should never get here due to failing sb verification, but
2849 * certain users (xfs_db) need to be usable even with corrupt metadata.
2851 if (sbp->sb_imax_pct && igeo->ialloc_blks) {
2853 * Make sure the maximum inode count is a multiple
2854 * of the units we allocate inodes in.
2856 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
2857 do_div(icount, 100);
2858 do_div(icount, igeo->ialloc_blks);
2859 igeo->maxicount = XFS_FSB_TO_INO(mp,
2860 icount * igeo->ialloc_blks);
2862 igeo->maxicount = 0;
2866 * Compute the desired size of an inode cluster buffer size, which
2867 * starts at 8K and (on v5 filesystems) scales up with larger inode
2870 * Preserve the desired inode cluster size because the sparse inodes
2871 * feature uses that desired size (not the actual size) to compute the
2872 * sparse inode alignment. The mount code validates this value, so we
2873 * cannot change the behavior.
2875 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
2876 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2877 int new_size = igeo->inode_cluster_size_raw;
2879 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
2880 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
2881 igeo->inode_cluster_size_raw = new_size;
2884 /* Calculate inode cluster ratios. */
2885 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
2886 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
2887 igeo->inode_cluster_size_raw);
2889 igeo->blocks_per_cluster = 1;
2890 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
2891 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
2893 /* Calculate inode cluster alignment. */
2894 if (xfs_sb_version_hasalign(&mp->m_sb) &&
2895 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
2896 igeo->cluster_align = mp->m_sb.sb_inoalignmt;
2898 igeo->cluster_align = 1;
2899 igeo->inoalign_mask = igeo->cluster_align - 1;
2900 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
2903 * If we are using stripe alignment, check whether
2904 * the stripe unit is a multiple of the inode alignment
2906 if (mp->m_dalign && igeo->inoalign_mask &&
2907 !(mp->m_dalign & igeo->inoalign_mask))
2908 igeo->ialloc_align = mp->m_dalign;
2910 igeo->ialloc_align = 0;