1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtalloc.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
34 #include "xfs_ag_resv.h"
35 #include "xfs_refcount.h"
36 #include "xfs_icache.h"
37 #include "xfs_iomap.h"
40 kmem_zone_t *xfs_bmap_free_item_zone;
43 * Miscellaneous helper functions
47 * Compute and fill in the value of the maximum depth of a bmap btree
48 * in this filesystem. Done once, during mount.
51 xfs_bmap_compute_maxlevels(
52 xfs_mount_t *mp, /* file system mount structure */
53 int whichfork) /* data or attr fork */
55 int level; /* btree level */
56 uint maxblocks; /* max blocks at this level */
57 uint maxleafents; /* max leaf entries possible */
58 int maxrootrecs; /* max records in root block */
59 int minleafrecs; /* min records in leaf block */
60 int minnoderecs; /* min records in node block */
61 int sz; /* root block size */
64 * The maximum number of extents in a file, hence the maximum
65 * number of leaf entries, is controlled by the type of di_nextents
66 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
67 * (a signed 16-bit number, xfs_aextnum_t).
69 * Note that we can no longer assume that if we are in ATTR1 that
70 * the fork offset of all the inodes will be
71 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
72 * with ATTR2 and then mounted back with ATTR1, keeping the
73 * di_forkoff's fixed but probably at various positions. Therefore,
74 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
75 * of a minimum size available.
77 if (whichfork == XFS_DATA_FORK) {
78 maxleafents = MAXEXTNUM;
79 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
81 maxleafents = MAXAEXTNUM;
82 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
84 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
85 minleafrecs = mp->m_bmap_dmnr[0];
86 minnoderecs = mp->m_bmap_dmnr[1];
87 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
88 for (level = 1; maxblocks > 1; level++) {
89 if (maxblocks <= maxrootrecs)
92 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
94 mp->m_bm_maxlevels[whichfork] = level;
97 STATIC int /* error */
99 struct xfs_btree_cur *cur,
100 struct xfs_bmbt_irec *irec,
101 int *stat) /* success/failure */
103 cur->bc_rec.b = *irec;
104 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
107 STATIC int /* error */
108 xfs_bmbt_lookup_first(
109 struct xfs_btree_cur *cur,
110 int *stat) /* success/failure */
112 cur->bc_rec.b.br_startoff = 0;
113 cur->bc_rec.b.br_startblock = 0;
114 cur->bc_rec.b.br_blockcount = 0;
115 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
119 * Check if the inode needs to be converted to btree format.
121 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
123 return whichfork != XFS_COW_FORK &&
124 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
125 XFS_IFORK_NEXTENTS(ip, whichfork) >
126 XFS_IFORK_MAXEXT(ip, whichfork);
130 * Check if the inode should be converted to extent format.
132 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
134 return whichfork != XFS_COW_FORK &&
135 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
136 XFS_IFORK_NEXTENTS(ip, whichfork) <=
137 XFS_IFORK_MAXEXT(ip, whichfork);
141 * Update the record referred to by cur to the value given by irec
142 * This either works (return 0) or gets an EFSCORRUPTED error.
146 struct xfs_btree_cur *cur,
147 struct xfs_bmbt_irec *irec)
149 union xfs_btree_rec rec;
151 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
152 return xfs_btree_update(cur, &rec);
156 * Compute the worst-case number of indirect blocks that will be used
157 * for ip's delayed extent of length "len".
160 xfs_bmap_worst_indlen(
161 xfs_inode_t *ip, /* incore inode pointer */
162 xfs_filblks_t len) /* delayed extent length */
164 int level; /* btree level number */
165 int maxrecs; /* maximum record count at this level */
166 xfs_mount_t *mp; /* mount structure */
167 xfs_filblks_t rval; /* return value */
170 maxrecs = mp->m_bmap_dmxr[0];
171 for (level = 0, rval = 0;
172 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
175 do_div(len, maxrecs);
178 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
181 maxrecs = mp->m_bmap_dmxr[1];
187 * Calculate the default attribute fork offset for newly created inodes.
190 xfs_default_attroffset(
191 struct xfs_inode *ip)
193 struct xfs_mount *mp = ip->i_mount;
196 if (mp->m_sb.sb_inodesize == 256) {
197 offset = XFS_LITINO(mp, ip->i_d.di_version) -
198 XFS_BMDR_SPACE_CALC(MINABTPTRS);
200 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
203 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
208 * Helper routine to reset inode di_forkoff field when switching
209 * attribute fork from local to extent format - we reset it where
210 * possible to make space available for inline data fork extents.
213 xfs_bmap_forkoff_reset(
217 if (whichfork == XFS_ATTR_FORK &&
218 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
219 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
220 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
222 if (dfl_forkoff > ip->i_d.di_forkoff)
223 ip->i_d.di_forkoff = dfl_forkoff;
228 STATIC struct xfs_buf *
230 struct xfs_btree_cur *cur,
233 struct xfs_log_item *lip;
239 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
240 if (!cur->bc_bufs[i])
242 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
243 return cur->bc_bufs[i];
246 /* Chase down all the log items to see if the bp is there */
247 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
248 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
250 if (bip->bli_item.li_type == XFS_LI_BUF &&
251 XFS_BUF_ADDR(bip->bli_buf) == bno)
260 struct xfs_btree_block *block,
266 __be64 *pp, *thispa; /* pointer to block address */
267 xfs_bmbt_key_t *prevp, *keyp;
269 ASSERT(be16_to_cpu(block->bb_level) > 0);
272 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
273 dmxr = mp->m_bmap_dmxr[0];
274 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
277 ASSERT(be64_to_cpu(prevp->br_startoff) <
278 be64_to_cpu(keyp->br_startoff));
283 * Compare the block numbers to see if there are dups.
286 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
288 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
290 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
292 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
294 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
295 if (*thispa == *pp) {
296 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
298 (unsigned long long)be64_to_cpu(*thispa));
299 xfs_err(mp, "%s: ptrs are equal in node\n",
301 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
308 * Check that the extents for the inode ip are in the right order in all
309 * btree leaves. THis becomes prohibitively expensive for large extent count
310 * files, so don't bother with inodes that have more than 10,000 extents in
311 * them. The btree record ordering checks will still be done, so for such large
312 * bmapbt constructs that is going to catch most corruptions.
315 xfs_bmap_check_leaf_extents(
316 xfs_btree_cur_t *cur, /* btree cursor or null */
317 xfs_inode_t *ip, /* incore inode pointer */
318 int whichfork) /* data or attr fork */
320 struct xfs_btree_block *block; /* current btree block */
321 xfs_fsblock_t bno; /* block # of "block" */
322 xfs_buf_t *bp; /* buffer for "block" */
323 int error; /* error return value */
324 xfs_extnum_t i=0, j; /* index into the extents list */
325 struct xfs_ifork *ifp; /* fork structure */
326 int level; /* btree level, for checking */
327 xfs_mount_t *mp; /* file system mount structure */
328 __be64 *pp; /* pointer to block address */
329 xfs_bmbt_rec_t *ep; /* pointer to current extent */
330 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
331 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
334 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
338 /* skip large extent count inodes */
339 if (ip->i_d.di_nextents > 10000)
344 ifp = XFS_IFORK_PTR(ip, whichfork);
345 block = ifp->if_broot;
347 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
349 level = be16_to_cpu(block->bb_level);
351 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
352 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
353 bno = be64_to_cpu(*pp);
355 ASSERT(bno != NULLFSBLOCK);
356 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
357 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
360 * Go down the tree until leaf level is reached, following the first
361 * pointer (leftmost) at each level.
363 while (level-- > 0) {
364 /* See if buf is in cur first */
366 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
369 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
375 block = XFS_BUF_TO_BLOCK(bp);
380 * Check this block for basic sanity (increasing keys and
381 * no duplicate blocks).
384 xfs_check_block(block, mp, 0, 0);
385 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
386 bno = be64_to_cpu(*pp);
387 XFS_WANT_CORRUPTED_GOTO(mp,
388 xfs_verify_fsbno(mp, bno), error0);
391 xfs_trans_brelse(NULL, bp);
396 * Here with bp and block set to the leftmost leaf node in the tree.
401 * Loop over all leaf nodes checking that all extents are in the right order.
404 xfs_fsblock_t nextbno;
405 xfs_extnum_t num_recs;
408 num_recs = xfs_btree_get_numrecs(block);
411 * Read-ahead the next leaf block, if any.
414 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
417 * Check all the extents to make sure they are OK.
418 * If we had a previous block, the last entry should
419 * conform with the first entry in this one.
422 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
424 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
425 xfs_bmbt_disk_get_blockcount(&last) <=
426 xfs_bmbt_disk_get_startoff(ep));
428 for (j = 1; j < num_recs; j++) {
429 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
430 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
431 xfs_bmbt_disk_get_blockcount(ep) <=
432 xfs_bmbt_disk_get_startoff(nextp));
440 xfs_trans_brelse(NULL, bp);
444 * If we've reached the end, stop.
446 if (bno == NULLFSBLOCK)
450 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
453 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
459 block = XFS_BUF_TO_BLOCK(bp);
465 xfs_warn(mp, "%s: at error0", __func__);
467 xfs_trans_brelse(NULL, bp);
469 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
471 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
472 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
477 * Validate that the bmbt_irecs being returned from bmapi are valid
478 * given the caller's original parameters. Specifically check the
479 * ranges of the returned irecs to ensure that they only extend beyond
480 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
483 xfs_bmap_validate_ret(
487 xfs_bmbt_irec_t *mval,
491 int i; /* index to map values */
493 ASSERT(ret_nmap <= nmap);
495 for (i = 0; i < ret_nmap; i++) {
496 ASSERT(mval[i].br_blockcount > 0);
497 if (!(flags & XFS_BMAPI_ENTIRE)) {
498 ASSERT(mval[i].br_startoff >= bno);
499 ASSERT(mval[i].br_blockcount <= len);
500 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
503 ASSERT(mval[i].br_startoff < bno + len);
504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
508 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
509 mval[i].br_startoff);
510 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
511 mval[i].br_startblock != HOLESTARTBLOCK);
512 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
513 mval[i].br_state == XFS_EXT_UNWRITTEN);
518 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
519 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
523 * bmap free list manipulation functions
527 * Add the extent to the list of extents to be free at transaction end.
528 * The list is maintained sorted (by block number).
532 struct xfs_trans *tp,
535 const struct xfs_owner_info *oinfo,
538 struct xfs_extent_free_item *new; /* new element */
540 struct xfs_mount *mp = tp->t_mountp;
544 ASSERT(bno != NULLFSBLOCK);
546 ASSERT(len <= MAXEXTLEN);
547 ASSERT(!isnullstartblock(bno));
548 agno = XFS_FSB_TO_AGNO(mp, bno);
549 agbno = XFS_FSB_TO_AGBNO(mp, bno);
550 ASSERT(agno < mp->m_sb.sb_agcount);
551 ASSERT(agbno < mp->m_sb.sb_agblocks);
552 ASSERT(len < mp->m_sb.sb_agblocks);
553 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
555 ASSERT(xfs_bmap_free_item_zone != NULL);
557 new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
558 new->xefi_startblock = bno;
559 new->xefi_blockcount = (xfs_extlen_t)len;
561 new->xefi_oinfo = *oinfo;
563 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
564 new->xefi_skip_discard = skip_discard;
565 trace_xfs_bmap_free_defer(tp->t_mountp,
566 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
567 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
568 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
572 * Inode fork format manipulation functions
576 * Convert the inode format to extent format if it currently is in btree format,
577 * but the extent list is small enough that it fits into the extent format.
579 * Since the extents are already in-core, all we have to do is give up the space
580 * for the btree root and pitch the leaf block.
582 STATIC int /* error */
583 xfs_bmap_btree_to_extents(
584 struct xfs_trans *tp, /* transaction pointer */
585 struct xfs_inode *ip, /* incore inode pointer */
586 struct xfs_btree_cur *cur, /* btree cursor */
587 int *logflagsp, /* inode logging flags */
588 int whichfork) /* data or attr fork */
590 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
591 struct xfs_mount *mp = ip->i_mount;
592 struct xfs_btree_block *rblock = ifp->if_broot;
593 struct xfs_btree_block *cblock;/* child btree block */
594 xfs_fsblock_t cbno; /* child block number */
595 xfs_buf_t *cbp; /* child block's buffer */
596 int error; /* error return value */
597 __be64 *pp; /* ptr to block address */
598 struct xfs_owner_info oinfo;
600 /* check if we actually need the extent format first: */
601 if (!xfs_bmap_wants_extents(ip, whichfork))
605 ASSERT(whichfork != XFS_COW_FORK);
606 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
607 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
608 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
609 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
610 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
612 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
613 cbno = be64_to_cpu(*pp);
615 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
616 xfs_btree_check_lptr(cur, cbno, 1));
618 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
622 cblock = XFS_BUF_TO_BLOCK(cbp);
623 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
625 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
626 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
627 ip->i_d.di_nblocks--;
628 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
629 xfs_trans_binval(tp, cbp);
630 if (cur->bc_bufs[0] == cbp)
631 cur->bc_bufs[0] = NULL;
632 xfs_iroot_realloc(ip, -1, whichfork);
633 ASSERT(ifp->if_broot == NULL);
634 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
635 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
636 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
641 * Convert an extents-format file into a btree-format file.
642 * The new file will have a root block (in the inode) and a single child block.
644 STATIC int /* error */
645 xfs_bmap_extents_to_btree(
646 struct xfs_trans *tp, /* transaction pointer */
647 struct xfs_inode *ip, /* incore inode pointer */
648 struct xfs_btree_cur **curp, /* cursor returned to caller */
649 int wasdel, /* converting a delayed alloc */
650 int *logflagsp, /* inode logging flags */
651 int whichfork) /* data or attr fork */
653 struct xfs_btree_block *ablock; /* allocated (child) bt block */
654 struct xfs_buf *abp; /* buffer for ablock */
655 struct xfs_alloc_arg args; /* allocation arguments */
656 struct xfs_bmbt_rec *arp; /* child record pointer */
657 struct xfs_btree_block *block; /* btree root block */
658 struct xfs_btree_cur *cur; /* bmap btree cursor */
659 int error; /* error return value */
660 struct xfs_ifork *ifp; /* inode fork pointer */
661 struct xfs_bmbt_key *kp; /* root block key pointer */
662 struct xfs_mount *mp; /* mount structure */
663 xfs_bmbt_ptr_t *pp; /* root block address pointer */
664 struct xfs_iext_cursor icur;
665 struct xfs_bmbt_irec rec;
666 xfs_extnum_t cnt = 0;
669 ASSERT(whichfork != XFS_COW_FORK);
670 ifp = XFS_IFORK_PTR(ip, whichfork);
671 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
674 * Make space in the inode incore. This needs to be undone if we fail
675 * to expand the root.
677 xfs_iroot_realloc(ip, 1, whichfork);
678 ifp->if_flags |= XFS_IFBROOT;
683 block = ifp->if_broot;
684 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
685 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
686 XFS_BTREE_LONG_PTRS);
688 * Need a cursor. Can't allocate until bb_level is filled in.
690 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
691 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
693 * Convert to a btree with two levels, one record in root.
695 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
696 memset(&args, 0, sizeof(args));
699 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
700 if (tp->t_firstblock == NULLFSBLOCK) {
701 args.type = XFS_ALLOCTYPE_START_BNO;
702 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
703 } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
704 args.type = XFS_ALLOCTYPE_START_BNO;
705 args.fsbno = tp->t_firstblock;
707 args.type = XFS_ALLOCTYPE_NEAR_BNO;
708 args.fsbno = tp->t_firstblock;
710 args.minlen = args.maxlen = args.prod = 1;
711 args.wasdel = wasdel;
713 error = xfs_alloc_vextent(&args);
715 goto out_root_realloc;
717 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
719 goto out_root_realloc;
723 * Allocation can't fail, the space was reserved.
725 ASSERT(tp->t_firstblock == NULLFSBLOCK ||
726 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
727 tp->t_firstblock = args.fsbno;
728 cur->bc_private.b.allocated++;
729 ip->i_d.di_nblocks++;
730 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
731 abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
733 error = -EFSCORRUPTED;
734 goto out_unreserve_dquot;
738 * Fill in the child block.
740 abp->b_ops = &xfs_bmbt_buf_ops;
741 ablock = XFS_BUF_TO_BLOCK(abp);
742 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
743 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
744 XFS_BTREE_LONG_PTRS);
746 for_each_xfs_iext(ifp, &icur, &rec) {
747 if (isnullstartblock(rec.br_startblock))
749 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
750 xfs_bmbt_disk_set_all(arp, &rec);
753 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
754 xfs_btree_set_numrecs(ablock, cnt);
757 * Fill in the root key and pointer.
759 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
760 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
761 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
762 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
763 be16_to_cpu(block->bb_level)));
764 *pp = cpu_to_be64(args.fsbno);
767 * Do all this logging at the end so that
768 * the root is at the right level.
770 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
771 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
772 ASSERT(*curp == NULL);
774 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
778 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
780 xfs_iroot_realloc(ip, -1, whichfork);
781 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
782 ASSERT(ifp->if_broot == NULL);
783 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
789 * Convert a local file to an extents file.
790 * This code is out of bounds for data forks of regular files,
791 * since the file data needs to get logged so things will stay consistent.
792 * (The bmap-level manipulations are ok, though).
795 xfs_bmap_local_to_extents_empty(
796 struct xfs_trans *tp,
797 struct xfs_inode *ip,
800 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
802 ASSERT(whichfork != XFS_COW_FORK);
803 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
804 ASSERT(ifp->if_bytes == 0);
805 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
807 xfs_bmap_forkoff_reset(ip, whichfork);
808 ifp->if_flags &= ~XFS_IFINLINE;
809 ifp->if_flags |= XFS_IFEXTENTS;
810 ifp->if_u1.if_root = NULL;
812 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
813 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
817 STATIC int /* error */
818 xfs_bmap_local_to_extents(
819 xfs_trans_t *tp, /* transaction pointer */
820 xfs_inode_t *ip, /* incore inode pointer */
821 xfs_extlen_t total, /* total blocks needed by transaction */
822 int *logflagsp, /* inode logging flags */
824 void (*init_fn)(struct xfs_trans *tp,
826 struct xfs_inode *ip,
827 struct xfs_ifork *ifp))
830 int flags; /* logging flags returned */
831 struct xfs_ifork *ifp; /* inode fork pointer */
832 xfs_alloc_arg_t args; /* allocation arguments */
833 xfs_buf_t *bp; /* buffer for extent block */
834 struct xfs_bmbt_irec rec;
835 struct xfs_iext_cursor icur;
838 * We don't want to deal with the case of keeping inode data inline yet.
839 * So sending the data fork of a regular inode is invalid.
841 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
842 ifp = XFS_IFORK_PTR(ip, whichfork);
843 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
845 if (!ifp->if_bytes) {
846 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
847 flags = XFS_ILOG_CORE;
853 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
854 memset(&args, 0, sizeof(args));
856 args.mp = ip->i_mount;
857 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
859 * Allocate a block. We know we need only one, since the
860 * file currently fits in an inode.
862 if (tp->t_firstblock == NULLFSBLOCK) {
863 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
864 args.type = XFS_ALLOCTYPE_START_BNO;
866 args.fsbno = tp->t_firstblock;
867 args.type = XFS_ALLOCTYPE_NEAR_BNO;
870 args.minlen = args.maxlen = args.prod = 1;
871 error = xfs_alloc_vextent(&args);
875 /* Can't fail, the space was reserved. */
876 ASSERT(args.fsbno != NULLFSBLOCK);
877 ASSERT(args.len == 1);
878 tp->t_firstblock = args.fsbno;
879 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno);
882 * Initialize the block, copy the data and log the remote buffer.
884 * The callout is responsible for logging because the remote format
885 * might differ from the local format and thus we don't know how much to
886 * log here. Note that init_fn must also set the buffer log item type
889 init_fn(tp, bp, ip, ifp);
891 /* account for the change in fork size */
892 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
893 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
894 flags |= XFS_ILOG_CORE;
896 ifp->if_u1.if_root = NULL;
900 rec.br_startblock = args.fsbno;
901 rec.br_blockcount = 1;
902 rec.br_state = XFS_EXT_NORM;
903 xfs_iext_first(ifp, &icur);
904 xfs_iext_insert(ip, &icur, &rec, 0);
906 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
907 ip->i_d.di_nblocks = 1;
908 xfs_trans_mod_dquot_byino(tp, ip,
909 XFS_TRANS_DQ_BCOUNT, 1L);
910 flags |= xfs_ilog_fext(whichfork);
918 * Called from xfs_bmap_add_attrfork to handle btree format files.
920 STATIC int /* error */
921 xfs_bmap_add_attrfork_btree(
922 xfs_trans_t *tp, /* transaction pointer */
923 xfs_inode_t *ip, /* incore inode pointer */
924 int *flags) /* inode logging flags */
926 xfs_btree_cur_t *cur; /* btree cursor */
927 int error; /* error return value */
928 xfs_mount_t *mp; /* file system mount struct */
929 int stat; /* newroot status */
932 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
933 *flags |= XFS_ILOG_DBROOT;
935 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
936 error = xfs_bmbt_lookup_first(cur, &stat);
939 /* must be at least one entry */
940 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
941 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
944 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
947 cur->bc_private.b.allocated = 0;
948 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
952 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
957 * Called from xfs_bmap_add_attrfork to handle extents format files.
959 STATIC int /* error */
960 xfs_bmap_add_attrfork_extents(
961 struct xfs_trans *tp, /* transaction pointer */
962 struct xfs_inode *ip, /* incore inode pointer */
963 int *flags) /* inode logging flags */
965 xfs_btree_cur_t *cur; /* bmap btree cursor */
966 int error; /* error return value */
968 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
971 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
974 cur->bc_private.b.allocated = 0;
975 xfs_btree_del_cursor(cur, error);
981 * Called from xfs_bmap_add_attrfork to handle local format files. Each
982 * different data fork content type needs a different callout to do the
983 * conversion. Some are basic and only require special block initialisation
984 * callouts for the data formating, others (directories) are so specialised they
985 * handle everything themselves.
987 * XXX (dgc): investigate whether directory conversion can use the generic
988 * formatting callout. It should be possible - it's just a very complex
991 STATIC int /* error */
992 xfs_bmap_add_attrfork_local(
993 struct xfs_trans *tp, /* transaction pointer */
994 struct xfs_inode *ip, /* incore inode pointer */
995 int *flags) /* inode logging flags */
997 struct xfs_da_args dargs; /* args for dir/attr code */
999 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1002 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1003 memset(&dargs, 0, sizeof(dargs));
1004 dargs.geo = ip->i_mount->m_dir_geo;
1006 dargs.total = dargs.geo->fsbcount;
1007 dargs.whichfork = XFS_DATA_FORK;
1009 return xfs_dir2_sf_to_block(&dargs);
1012 if (S_ISLNK(VFS_I(ip)->i_mode))
1013 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
1015 xfs_symlink_local_to_remote);
1017 /* should only be called for types that support local format data */
1019 return -EFSCORRUPTED;
1022 /* Set an inode attr fork off based on the format */
1024 xfs_bmap_set_attrforkoff(
1025 struct xfs_inode *ip,
1029 switch (ip->i_d.di_format) {
1030 case XFS_DINODE_FMT_DEV:
1031 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1033 case XFS_DINODE_FMT_LOCAL:
1034 case XFS_DINODE_FMT_EXTENTS:
1035 case XFS_DINODE_FMT_BTREE:
1036 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1037 if (!ip->i_d.di_forkoff)
1038 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1039 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
1051 * Convert inode from non-attributed to attributed.
1052 * Must not be in a transaction, ip must not be locked.
1054 int /* error code */
1055 xfs_bmap_add_attrfork(
1056 xfs_inode_t *ip, /* incore inode pointer */
1057 int size, /* space new attribute needs */
1058 int rsvd) /* xact may use reserved blks */
1060 xfs_mount_t *mp; /* mount structure */
1061 xfs_trans_t *tp; /* transaction pointer */
1062 int blks; /* space reservation */
1063 int version = 1; /* superblock attr version */
1064 int logflags; /* logging flags */
1065 int error; /* error return value */
1067 ASSERT(XFS_IFORK_Q(ip) == 0);
1070 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1072 blks = XFS_ADDAFORK_SPACE_RES(mp);
1074 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1075 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1079 xfs_ilock(ip, XFS_ILOCK_EXCL);
1080 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1081 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1082 XFS_QMOPT_RES_REGBLKS);
1085 if (XFS_IFORK_Q(ip))
1087 if (ip->i_d.di_anextents != 0) {
1088 error = -EFSCORRUPTED;
1091 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1093 * For inodes coming from pre-6.2 filesystems.
1095 ASSERT(ip->i_d.di_aformat == 0);
1096 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1099 xfs_trans_ijoin(tp, ip, 0);
1100 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1101 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1104 ASSERT(ip->i_afp == NULL);
1105 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0);
1106 ip->i_afp->if_flags = XFS_IFEXTENTS;
1108 switch (ip->i_d.di_format) {
1109 case XFS_DINODE_FMT_LOCAL:
1110 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1112 case XFS_DINODE_FMT_EXTENTS:
1113 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1115 case XFS_DINODE_FMT_BTREE:
1116 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1123 xfs_trans_log_inode(tp, ip, logflags);
1126 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1127 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1128 bool log_sb = false;
1130 spin_lock(&mp->m_sb_lock);
1131 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1132 xfs_sb_version_addattr(&mp->m_sb);
1135 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1136 xfs_sb_version_addattr2(&mp->m_sb);
1139 spin_unlock(&mp->m_sb_lock);
1144 error = xfs_trans_commit(tp);
1145 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1149 xfs_trans_cancel(tp);
1150 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1155 * Internal and external extent tree search functions.
1159 * Read in extents from a btree-format inode.
1163 struct xfs_trans *tp,
1164 struct xfs_inode *ip,
1167 struct xfs_mount *mp = ip->i_mount;
1168 int state = xfs_bmap_fork_to_state(whichfork);
1169 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1170 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1171 struct xfs_btree_block *block = ifp->if_broot;
1172 struct xfs_iext_cursor icur;
1173 struct xfs_bmbt_irec new;
1181 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1183 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1184 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1185 return -EFSCORRUPTED;
1189 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1191 level = be16_to_cpu(block->bb_level);
1192 if (unlikely(level == 0)) {
1193 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1194 return -EFSCORRUPTED;
1196 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1197 bno = be64_to_cpu(*pp);
1200 * Go down the tree until leaf level is reached, following the first
1201 * pointer (leftmost) at each level.
1203 while (level-- > 0) {
1204 error = xfs_btree_read_bufl(mp, tp, bno, &bp,
1205 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1208 block = XFS_BUF_TO_BLOCK(bp);
1211 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1212 bno = be64_to_cpu(*pp);
1213 XFS_WANT_CORRUPTED_GOTO(mp,
1214 xfs_verify_fsbno(mp, bno), out_brelse);
1215 xfs_trans_brelse(tp, bp);
1219 * Here with bp and block set to the leftmost leaf node in the tree.
1222 xfs_iext_first(ifp, &icur);
1225 * Loop over all leaf nodes. Copy information to the extent records.
1228 xfs_bmbt_rec_t *frp;
1229 xfs_fsblock_t nextbno;
1230 xfs_extnum_t num_recs;
1232 num_recs = xfs_btree_get_numrecs(block);
1233 if (unlikely(i + num_recs > nextents)) {
1234 xfs_warn(ip->i_mount,
1235 "corrupt dinode %Lu, (btree extents).",
1236 (unsigned long long) ip->i_ino);
1237 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1238 __func__, block, sizeof(*block),
1240 error = -EFSCORRUPTED;
1244 * Read-ahead the next leaf block, if any.
1246 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1247 if (nextbno != NULLFSBLOCK)
1248 xfs_btree_reada_bufl(mp, nextbno, 1,
1251 * Copy records into the extent records.
1253 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1254 for (j = 0; j < num_recs; j++, frp++, i++) {
1257 xfs_bmbt_disk_get_all(frp, &new);
1258 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1260 error = -EFSCORRUPTED;
1261 xfs_inode_verifier_error(ip, error,
1262 "xfs_iread_extents(2)",
1263 frp, sizeof(*frp), fa);
1266 xfs_iext_insert(ip, &icur, &new, state);
1267 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
1268 xfs_iext_next(ifp, &icur);
1270 xfs_trans_brelse(tp, bp);
1273 * If we've reached the end, stop.
1275 if (bno == NULLFSBLOCK)
1277 error = xfs_btree_read_bufl(mp, tp, bno, &bp,
1278 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1281 block = XFS_BUF_TO_BLOCK(bp);
1284 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
1285 error = -EFSCORRUPTED;
1288 ASSERT(i == xfs_iext_count(ifp));
1290 ifp->if_flags |= XFS_IFEXTENTS;
1294 xfs_trans_brelse(tp, bp);
1296 xfs_iext_destroy(ifp);
1301 * Returns the relative block number of the first unused block(s) in the given
1302 * fork with at least "len" logically contiguous blocks free. This is the
1303 * lowest-address hole if the fork has holes, else the first block past the end
1304 * of fork. Return 0 if the fork is currently local (in-inode).
1307 xfs_bmap_first_unused(
1308 struct xfs_trans *tp, /* transaction pointer */
1309 struct xfs_inode *ip, /* incore inode */
1310 xfs_extlen_t len, /* size of hole to find */
1311 xfs_fileoff_t *first_unused, /* unused block */
1312 int whichfork) /* data or attr fork */
1314 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1315 struct xfs_bmbt_irec got;
1316 struct xfs_iext_cursor icur;
1317 xfs_fileoff_t lastaddr = 0;
1318 xfs_fileoff_t lowest, max;
1321 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1322 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1323 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1325 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1330 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1331 error = xfs_iread_extents(tp, ip, whichfork);
1336 lowest = max = *first_unused;
1337 for_each_xfs_iext(ifp, &icur, &got) {
1339 * See if the hole before this extent will work.
1341 if (got.br_startoff >= lowest + len &&
1342 got.br_startoff - max >= len)
1344 lastaddr = got.br_startoff + got.br_blockcount;
1345 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1348 *first_unused = max;
1353 * Returns the file-relative block number of the last block - 1 before
1354 * last_block (input value) in the file.
1355 * This is not based on i_size, it is based on the extent records.
1356 * Returns 0 for local files, as they do not have extent records.
1359 xfs_bmap_last_before(
1360 struct xfs_trans *tp, /* transaction pointer */
1361 struct xfs_inode *ip, /* incore inode */
1362 xfs_fileoff_t *last_block, /* last block */
1363 int whichfork) /* data or attr fork */
1365 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1366 struct xfs_bmbt_irec got;
1367 struct xfs_iext_cursor icur;
1370 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1371 case XFS_DINODE_FMT_LOCAL:
1374 case XFS_DINODE_FMT_BTREE:
1375 case XFS_DINODE_FMT_EXTENTS:
1381 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1382 error = xfs_iread_extents(tp, ip, whichfork);
1387 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1393 xfs_bmap_last_extent(
1394 struct xfs_trans *tp,
1395 struct xfs_inode *ip,
1397 struct xfs_bmbt_irec *rec,
1400 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1401 struct xfs_iext_cursor icur;
1404 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1405 error = xfs_iread_extents(tp, ip, whichfork);
1410 xfs_iext_last(ifp, &icur);
1411 if (!xfs_iext_get_extent(ifp, &icur, rec))
1419 * Check the last inode extent to determine whether this allocation will result
1420 * in blocks being allocated at the end of the file. When we allocate new data
1421 * blocks at the end of the file which do not start at the previous data block,
1422 * we will try to align the new blocks at stripe unit boundaries.
1424 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1425 * at, or past the EOF.
1429 struct xfs_bmalloca *bma,
1432 struct xfs_bmbt_irec rec;
1437 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1448 * Check if we are allocation or past the last extent, or at least into
1449 * the last delayed allocated extent.
1451 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1452 (bma->offset >= rec.br_startoff &&
1453 isnullstartblock(rec.br_startblock));
1458 * Returns the file-relative block number of the first block past eof in
1459 * the file. This is not based on i_size, it is based on the extent records.
1460 * Returns 0 for local files, as they do not have extent records.
1463 xfs_bmap_last_offset(
1464 struct xfs_inode *ip,
1465 xfs_fileoff_t *last_block,
1468 struct xfs_bmbt_irec rec;
1474 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1477 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1478 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1481 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1482 if (error || is_empty)
1485 *last_block = rec.br_startoff + rec.br_blockcount;
1490 * Returns whether the selected fork of the inode has exactly one
1491 * block or not. For the data fork we check this matches di_size,
1492 * implying the file's range is 0..bsize-1.
1494 int /* 1=>1 block, 0=>otherwise */
1496 xfs_inode_t *ip, /* incore inode */
1497 int whichfork) /* data or attr fork */
1499 struct xfs_ifork *ifp; /* inode fork pointer */
1500 int rval; /* return value */
1501 xfs_bmbt_irec_t s; /* internal version of extent */
1502 struct xfs_iext_cursor icur;
1505 if (whichfork == XFS_DATA_FORK)
1506 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1508 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1510 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1512 ifp = XFS_IFORK_PTR(ip, whichfork);
1513 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1514 xfs_iext_first(ifp, &icur);
1515 xfs_iext_get_extent(ifp, &icur, &s);
1516 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1517 if (rval && whichfork == XFS_DATA_FORK)
1518 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1523 * Extent tree manipulation functions used during allocation.
1527 * Convert a delayed allocation to a real allocation.
1529 STATIC int /* error */
1530 xfs_bmap_add_extent_delay_real(
1531 struct xfs_bmalloca *bma,
1534 struct xfs_bmbt_irec *new = &bma->got;
1535 int error; /* error return value */
1536 int i; /* temp state */
1537 struct xfs_ifork *ifp; /* inode fork pointer */
1538 xfs_fileoff_t new_endoff; /* end offset of new entry */
1539 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1540 /* left is 0, right is 1, prev is 2 */
1541 int rval=0; /* return value (logging flags) */
1542 int state = xfs_bmap_fork_to_state(whichfork);
1543 xfs_filblks_t da_new; /* new count del alloc blocks used */
1544 xfs_filblks_t da_old; /* old count del alloc blocks used */
1545 xfs_filblks_t temp=0; /* value for da_new calculations */
1546 int tmp_rval; /* partial logging flags */
1547 struct xfs_mount *mp;
1548 xfs_extnum_t *nextents;
1549 struct xfs_bmbt_irec old;
1551 mp = bma->ip->i_mount;
1552 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1553 ASSERT(whichfork != XFS_ATTR_FORK);
1554 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1555 &bma->ip->i_d.di_nextents);
1557 ASSERT(!isnullstartblock(new->br_startblock));
1559 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1561 XFS_STATS_INC(mp, xs_add_exlist);
1568 * Set up a bunch of variables to make the tests simpler.
1570 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1571 new_endoff = new->br_startoff + new->br_blockcount;
1572 ASSERT(isnullstartblock(PREV.br_startblock));
1573 ASSERT(PREV.br_startoff <= new->br_startoff);
1574 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1576 da_old = startblockval(PREV.br_startblock);
1580 * Set flags determining what part of the previous delayed allocation
1581 * extent is being replaced by a real allocation.
1583 if (PREV.br_startoff == new->br_startoff)
1584 state |= BMAP_LEFT_FILLING;
1585 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1586 state |= BMAP_RIGHT_FILLING;
1589 * Check and set flags if this segment has a left neighbor.
1590 * Don't set contiguous if the combined extent would be too large.
1592 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1593 state |= BMAP_LEFT_VALID;
1594 if (isnullstartblock(LEFT.br_startblock))
1595 state |= BMAP_LEFT_DELAY;
1598 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1599 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1600 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1601 LEFT.br_state == new->br_state &&
1602 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1603 state |= BMAP_LEFT_CONTIG;
1606 * Check and set flags if this segment has a right neighbor.
1607 * Don't set contiguous if the combined extent would be too large.
1608 * Also check for all-three-contiguous being too large.
1610 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1611 state |= BMAP_RIGHT_VALID;
1612 if (isnullstartblock(RIGHT.br_startblock))
1613 state |= BMAP_RIGHT_DELAY;
1616 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1617 new_endoff == RIGHT.br_startoff &&
1618 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1619 new->br_state == RIGHT.br_state &&
1620 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1621 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1622 BMAP_RIGHT_FILLING)) !=
1623 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1624 BMAP_RIGHT_FILLING) ||
1625 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1627 state |= BMAP_RIGHT_CONTIG;
1631 * Switch out based on the FILLING and CONTIG state bits.
1633 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1634 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1635 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1636 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1638 * Filling in all of a previously delayed allocation extent.
1639 * The left and right neighbors are both contiguous with new.
1641 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1643 xfs_iext_remove(bma->ip, &bma->icur, state);
1644 xfs_iext_remove(bma->ip, &bma->icur, state);
1645 xfs_iext_prev(ifp, &bma->icur);
1646 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1649 if (bma->cur == NULL)
1650 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1652 rval = XFS_ILOG_CORE;
1653 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1656 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1657 error = xfs_btree_delete(bma->cur, &i);
1660 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1661 error = xfs_btree_decrement(bma->cur, 0, &i);
1664 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1665 error = xfs_bmbt_update(bma->cur, &LEFT);
1671 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1673 * Filling in all of a previously delayed allocation extent.
1674 * The left neighbor is contiguous, the right is not.
1677 LEFT.br_blockcount += PREV.br_blockcount;
1679 xfs_iext_remove(bma->ip, &bma->icur, state);
1680 xfs_iext_prev(ifp, &bma->icur);
1681 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1683 if (bma->cur == NULL)
1684 rval = XFS_ILOG_DEXT;
1687 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1690 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1691 error = xfs_bmbt_update(bma->cur, &LEFT);
1697 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1699 * Filling in all of a previously delayed allocation extent.
1700 * The right neighbor is contiguous, the left is not. Take care
1701 * with delay -> unwritten extent allocation here because the
1702 * delalloc record we are overwriting is always written.
1704 PREV.br_startblock = new->br_startblock;
1705 PREV.br_blockcount += RIGHT.br_blockcount;
1706 PREV.br_state = new->br_state;
1708 xfs_iext_next(ifp, &bma->icur);
1709 xfs_iext_remove(bma->ip, &bma->icur, state);
1710 xfs_iext_prev(ifp, &bma->icur);
1711 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1713 if (bma->cur == NULL)
1714 rval = XFS_ILOG_DEXT;
1717 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1720 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1721 error = xfs_bmbt_update(bma->cur, &PREV);
1727 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1729 * Filling in all of a previously delayed allocation extent.
1730 * Neither the left nor right neighbors are contiguous with
1733 PREV.br_startblock = new->br_startblock;
1734 PREV.br_state = new->br_state;
1735 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1738 if (bma->cur == NULL)
1739 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1741 rval = XFS_ILOG_CORE;
1742 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1745 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1746 error = xfs_btree_insert(bma->cur, &i);
1749 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1753 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1755 * Filling in the first part of a previous delayed allocation.
1756 * The left neighbor is contiguous.
1759 temp = PREV.br_blockcount - new->br_blockcount;
1760 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1761 startblockval(PREV.br_startblock));
1763 LEFT.br_blockcount += new->br_blockcount;
1765 PREV.br_blockcount = temp;
1766 PREV.br_startoff += new->br_blockcount;
1767 PREV.br_startblock = nullstartblock(da_new);
1769 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1770 xfs_iext_prev(ifp, &bma->icur);
1771 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1773 if (bma->cur == NULL)
1774 rval = XFS_ILOG_DEXT;
1777 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1780 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1781 error = xfs_bmbt_update(bma->cur, &LEFT);
1787 case BMAP_LEFT_FILLING:
1789 * Filling in the first part of a previous delayed allocation.
1790 * The left neighbor is not contiguous.
1792 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1794 if (bma->cur == NULL)
1795 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1797 rval = XFS_ILOG_CORE;
1798 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1801 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1802 error = xfs_btree_insert(bma->cur, &i);
1805 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1808 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1809 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1810 &bma->cur, 1, &tmp_rval, whichfork);
1816 temp = PREV.br_blockcount - new->br_blockcount;
1817 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1818 startblockval(PREV.br_startblock) -
1819 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1821 PREV.br_startoff = new_endoff;
1822 PREV.br_blockcount = temp;
1823 PREV.br_startblock = nullstartblock(da_new);
1824 xfs_iext_next(ifp, &bma->icur);
1825 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1826 xfs_iext_prev(ifp, &bma->icur);
1829 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1831 * Filling in the last part of a previous delayed allocation.
1832 * The right neighbor is contiguous with the new allocation.
1835 RIGHT.br_startoff = new->br_startoff;
1836 RIGHT.br_startblock = new->br_startblock;
1837 RIGHT.br_blockcount += new->br_blockcount;
1839 if (bma->cur == NULL)
1840 rval = XFS_ILOG_DEXT;
1843 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1846 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1847 error = xfs_bmbt_update(bma->cur, &RIGHT);
1852 temp = PREV.br_blockcount - new->br_blockcount;
1853 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1854 startblockval(PREV.br_startblock));
1856 PREV.br_blockcount = temp;
1857 PREV.br_startblock = nullstartblock(da_new);
1859 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1860 xfs_iext_next(ifp, &bma->icur);
1861 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1864 case BMAP_RIGHT_FILLING:
1866 * Filling in the last part of a previous delayed allocation.
1867 * The right neighbor is not contiguous.
1869 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1871 if (bma->cur == NULL)
1872 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1874 rval = XFS_ILOG_CORE;
1875 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1878 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1879 error = xfs_btree_insert(bma->cur, &i);
1882 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1885 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1886 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1887 &bma->cur, 1, &tmp_rval, whichfork);
1893 temp = PREV.br_blockcount - new->br_blockcount;
1894 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1895 startblockval(PREV.br_startblock) -
1896 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1898 PREV.br_startblock = nullstartblock(da_new);
1899 PREV.br_blockcount = temp;
1900 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1901 xfs_iext_next(ifp, &bma->icur);
1906 * Filling in the middle part of a previous delayed allocation.
1907 * Contiguity is impossible here.
1908 * This case is avoided almost all the time.
1910 * We start with a delayed allocation:
1912 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1915 * and we are allocating:
1916 * +rrrrrrrrrrrrrrrrr+
1919 * and we set it up for insertion as:
1920 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1922 * PREV @ idx LEFT RIGHT
1923 * inserted at idx + 1
1927 /* LEFT is the new middle */
1930 /* RIGHT is the new right */
1931 RIGHT.br_state = PREV.br_state;
1932 RIGHT.br_startoff = new_endoff;
1933 RIGHT.br_blockcount =
1934 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1935 RIGHT.br_startblock =
1936 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1937 RIGHT.br_blockcount));
1940 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1941 PREV.br_startblock =
1942 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1943 PREV.br_blockcount));
1944 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1946 xfs_iext_next(ifp, &bma->icur);
1947 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1948 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1951 if (bma->cur == NULL)
1952 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1954 rval = XFS_ILOG_CORE;
1955 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1958 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1959 error = xfs_btree_insert(bma->cur, &i);
1962 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1965 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1966 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1967 &bma->cur, 1, &tmp_rval, whichfork);
1973 da_new = startblockval(PREV.br_startblock) +
1974 startblockval(RIGHT.br_startblock);
1977 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1978 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1979 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1980 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1981 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1982 case BMAP_LEFT_CONTIG:
1983 case BMAP_RIGHT_CONTIG:
1985 * These cases are all impossible.
1990 /* add reverse mapping unless caller opted out */
1991 if (!(bma->flags & XFS_BMAPI_NORMAP))
1992 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1994 /* convert to a btree if necessary */
1995 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1996 int tmp_logflags; /* partial log flag return val */
1998 ASSERT(bma->cur == NULL);
1999 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2000 &bma->cur, da_old > 0, &tmp_logflags,
2002 bma->logflags |= tmp_logflags;
2007 if (da_new != da_old)
2008 xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
2011 da_new += bma->cur->bc_private.b.allocated;
2012 bma->cur->bc_private.b.allocated = 0;
2015 /* adjust for changes in reserved delayed indirect blocks */
2016 if (da_new != da_old) {
2017 ASSERT(state == 0 || da_new < da_old);
2018 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2022 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2024 if (whichfork != XFS_COW_FORK)
2025 bma->logflags |= rval;
2033 * Convert an unwritten allocation to a real allocation or vice versa.
2036 xfs_bmap_add_extent_unwritten_real(
2037 struct xfs_trans *tp,
2038 xfs_inode_t *ip, /* incore inode pointer */
2040 struct xfs_iext_cursor *icur,
2041 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2042 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2043 int *logflagsp) /* inode logging flags */
2045 xfs_btree_cur_t *cur; /* btree cursor */
2046 int error; /* error return value */
2047 int i; /* temp state */
2048 struct xfs_ifork *ifp; /* inode fork pointer */
2049 xfs_fileoff_t new_endoff; /* end offset of new entry */
2050 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2051 /* left is 0, right is 1, prev is 2 */
2052 int rval=0; /* return value (logging flags) */
2053 int state = xfs_bmap_fork_to_state(whichfork);
2054 struct xfs_mount *mp = ip->i_mount;
2055 struct xfs_bmbt_irec old;
2060 ifp = XFS_IFORK_PTR(ip, whichfork);
2062 ASSERT(!isnullstartblock(new->br_startblock));
2064 XFS_STATS_INC(mp, xs_add_exlist);
2071 * Set up a bunch of variables to make the tests simpler.
2074 xfs_iext_get_extent(ifp, icur, &PREV);
2075 ASSERT(new->br_state != PREV.br_state);
2076 new_endoff = new->br_startoff + new->br_blockcount;
2077 ASSERT(PREV.br_startoff <= new->br_startoff);
2078 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2081 * Set flags determining what part of the previous oldext allocation
2082 * extent is being replaced by a newext allocation.
2084 if (PREV.br_startoff == new->br_startoff)
2085 state |= BMAP_LEFT_FILLING;
2086 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2087 state |= BMAP_RIGHT_FILLING;
2090 * Check and set flags if this segment has a left neighbor.
2091 * Don't set contiguous if the combined extent would be too large.
2093 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2094 state |= BMAP_LEFT_VALID;
2095 if (isnullstartblock(LEFT.br_startblock))
2096 state |= BMAP_LEFT_DELAY;
2099 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2100 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2101 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2102 LEFT.br_state == new->br_state &&
2103 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2104 state |= BMAP_LEFT_CONTIG;
2107 * Check and set flags if this segment has a right neighbor.
2108 * Don't set contiguous if the combined extent would be too large.
2109 * Also check for all-three-contiguous being too large.
2111 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2112 state |= BMAP_RIGHT_VALID;
2113 if (isnullstartblock(RIGHT.br_startblock))
2114 state |= BMAP_RIGHT_DELAY;
2117 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2118 new_endoff == RIGHT.br_startoff &&
2119 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2120 new->br_state == RIGHT.br_state &&
2121 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2122 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2123 BMAP_RIGHT_FILLING)) !=
2124 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2125 BMAP_RIGHT_FILLING) ||
2126 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2128 state |= BMAP_RIGHT_CONTIG;
2131 * Switch out based on the FILLING and CONTIG state bits.
2133 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2134 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2135 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2136 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2138 * Setting all of a previous oldext extent to newext.
2139 * The left and right neighbors are both contiguous with new.
2141 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2143 xfs_iext_remove(ip, icur, state);
2144 xfs_iext_remove(ip, icur, state);
2145 xfs_iext_prev(ifp, icur);
2146 xfs_iext_update_extent(ip, state, icur, &LEFT);
2147 XFS_IFORK_NEXT_SET(ip, whichfork,
2148 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2150 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2152 rval = XFS_ILOG_CORE;
2153 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2156 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2157 if ((error = xfs_btree_delete(cur, &i)))
2159 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2160 if ((error = xfs_btree_decrement(cur, 0, &i)))
2162 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2163 if ((error = xfs_btree_delete(cur, &i)))
2165 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2166 if ((error = xfs_btree_decrement(cur, 0, &i)))
2168 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2169 error = xfs_bmbt_update(cur, &LEFT);
2175 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2177 * Setting all of a previous oldext extent to newext.
2178 * The left neighbor is contiguous, the right is not.
2180 LEFT.br_blockcount += PREV.br_blockcount;
2182 xfs_iext_remove(ip, icur, state);
2183 xfs_iext_prev(ifp, icur);
2184 xfs_iext_update_extent(ip, state, icur, &LEFT);
2185 XFS_IFORK_NEXT_SET(ip, whichfork,
2186 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2188 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2190 rval = XFS_ILOG_CORE;
2191 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2194 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2195 if ((error = xfs_btree_delete(cur, &i)))
2197 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2198 if ((error = xfs_btree_decrement(cur, 0, &i)))
2200 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2201 error = xfs_bmbt_update(cur, &LEFT);
2207 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2209 * Setting all of a previous oldext extent to newext.
2210 * The right neighbor is contiguous, the left is not.
2212 PREV.br_blockcount += RIGHT.br_blockcount;
2213 PREV.br_state = new->br_state;
2215 xfs_iext_next(ifp, icur);
2216 xfs_iext_remove(ip, icur, state);
2217 xfs_iext_prev(ifp, icur);
2218 xfs_iext_update_extent(ip, state, icur, &PREV);
2220 XFS_IFORK_NEXT_SET(ip, whichfork,
2221 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2223 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2225 rval = XFS_ILOG_CORE;
2226 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2229 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2230 if ((error = xfs_btree_delete(cur, &i)))
2232 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2233 if ((error = xfs_btree_decrement(cur, 0, &i)))
2235 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2236 error = xfs_bmbt_update(cur, &PREV);
2242 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2244 * Setting all of a previous oldext extent to newext.
2245 * Neither the left nor right neighbors are contiguous with
2248 PREV.br_state = new->br_state;
2249 xfs_iext_update_extent(ip, state, icur, &PREV);
2252 rval = XFS_ILOG_DEXT;
2255 error = xfs_bmbt_lookup_eq(cur, new, &i);
2258 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2259 error = xfs_bmbt_update(cur, &PREV);
2265 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2267 * Setting the first part of a previous oldext extent to newext.
2268 * The left neighbor is contiguous.
2270 LEFT.br_blockcount += new->br_blockcount;
2273 PREV.br_startoff += new->br_blockcount;
2274 PREV.br_startblock += new->br_blockcount;
2275 PREV.br_blockcount -= new->br_blockcount;
2277 xfs_iext_update_extent(ip, state, icur, &PREV);
2278 xfs_iext_prev(ifp, icur);
2279 xfs_iext_update_extent(ip, state, icur, &LEFT);
2282 rval = XFS_ILOG_DEXT;
2285 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2288 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2289 error = xfs_bmbt_update(cur, &PREV);
2292 error = xfs_btree_decrement(cur, 0, &i);
2295 error = xfs_bmbt_update(cur, &LEFT);
2301 case BMAP_LEFT_FILLING:
2303 * Setting the first part of a previous oldext extent to newext.
2304 * The left neighbor is not contiguous.
2307 PREV.br_startoff += new->br_blockcount;
2308 PREV.br_startblock += new->br_blockcount;
2309 PREV.br_blockcount -= new->br_blockcount;
2311 xfs_iext_update_extent(ip, state, icur, &PREV);
2312 xfs_iext_insert(ip, icur, new, state);
2313 XFS_IFORK_NEXT_SET(ip, whichfork,
2314 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2316 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2318 rval = XFS_ILOG_CORE;
2319 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2322 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2323 error = xfs_bmbt_update(cur, &PREV);
2326 cur->bc_rec.b = *new;
2327 if ((error = xfs_btree_insert(cur, &i)))
2329 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2333 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2335 * Setting the last part of a previous oldext extent to newext.
2336 * The right neighbor is contiguous with the new allocation.
2339 PREV.br_blockcount -= new->br_blockcount;
2341 RIGHT.br_startoff = new->br_startoff;
2342 RIGHT.br_startblock = new->br_startblock;
2343 RIGHT.br_blockcount += new->br_blockcount;
2345 xfs_iext_update_extent(ip, state, icur, &PREV);
2346 xfs_iext_next(ifp, icur);
2347 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2350 rval = XFS_ILOG_DEXT;
2353 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2356 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2357 error = xfs_bmbt_update(cur, &PREV);
2360 error = xfs_btree_increment(cur, 0, &i);
2363 error = xfs_bmbt_update(cur, &RIGHT);
2369 case BMAP_RIGHT_FILLING:
2371 * Setting the last part of a previous oldext extent to newext.
2372 * The right neighbor is not contiguous.
2375 PREV.br_blockcount -= new->br_blockcount;
2377 xfs_iext_update_extent(ip, state, icur, &PREV);
2378 xfs_iext_next(ifp, icur);
2379 xfs_iext_insert(ip, icur, new, state);
2381 XFS_IFORK_NEXT_SET(ip, whichfork,
2382 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2384 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2386 rval = XFS_ILOG_CORE;
2387 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2390 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2391 error = xfs_bmbt_update(cur, &PREV);
2394 error = xfs_bmbt_lookup_eq(cur, new, &i);
2397 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2398 if ((error = xfs_btree_insert(cur, &i)))
2400 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2406 * Setting the middle part of a previous oldext extent to
2407 * newext. Contiguity is impossible here.
2408 * One extent becomes three extents.
2411 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2414 r[1].br_startoff = new_endoff;
2415 r[1].br_blockcount =
2416 old.br_startoff + old.br_blockcount - new_endoff;
2417 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2418 r[1].br_state = PREV.br_state;
2420 xfs_iext_update_extent(ip, state, icur, &PREV);
2421 xfs_iext_next(ifp, icur);
2422 xfs_iext_insert(ip, icur, &r[1], state);
2423 xfs_iext_insert(ip, icur, &r[0], state);
2425 XFS_IFORK_NEXT_SET(ip, whichfork,
2426 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2428 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2430 rval = XFS_ILOG_CORE;
2431 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2434 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2435 /* new right extent - oldext */
2436 error = xfs_bmbt_update(cur, &r[1]);
2439 /* new left extent - oldext */
2440 cur->bc_rec.b = PREV;
2441 if ((error = xfs_btree_insert(cur, &i)))
2443 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2445 * Reset the cursor to the position of the new extent
2446 * we are about to insert as we can't trust it after
2447 * the previous insert.
2449 error = xfs_bmbt_lookup_eq(cur, new, &i);
2452 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2453 /* new middle extent - newext */
2454 if ((error = xfs_btree_insert(cur, &i)))
2456 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2460 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2461 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2462 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2463 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2464 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2465 case BMAP_LEFT_CONTIG:
2466 case BMAP_RIGHT_CONTIG:
2468 * These cases are all impossible.
2473 /* update reverse mappings */
2474 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2476 /* convert to a btree if necessary */
2477 if (xfs_bmap_needs_btree(ip, whichfork)) {
2478 int tmp_logflags; /* partial log flag return val */
2480 ASSERT(cur == NULL);
2481 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2482 &tmp_logflags, whichfork);
2483 *logflagsp |= tmp_logflags;
2488 /* clear out the allocated field, done with it now in any case. */
2490 cur->bc_private.b.allocated = 0;
2494 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2504 * Convert a hole to a delayed allocation.
2507 xfs_bmap_add_extent_hole_delay(
2508 xfs_inode_t *ip, /* incore inode pointer */
2510 struct xfs_iext_cursor *icur,
2511 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2513 struct xfs_ifork *ifp; /* inode fork pointer */
2514 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2515 xfs_filblks_t newlen=0; /* new indirect size */
2516 xfs_filblks_t oldlen=0; /* old indirect size */
2517 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2518 int state = xfs_bmap_fork_to_state(whichfork);
2519 xfs_filblks_t temp; /* temp for indirect calculations */
2521 ifp = XFS_IFORK_PTR(ip, whichfork);
2522 ASSERT(isnullstartblock(new->br_startblock));
2525 * Check and set flags if this segment has a left neighbor
2527 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2528 state |= BMAP_LEFT_VALID;
2529 if (isnullstartblock(left.br_startblock))
2530 state |= BMAP_LEFT_DELAY;
2534 * Check and set flags if the current (right) segment exists.
2535 * If it doesn't exist, we're converting the hole at end-of-file.
2537 if (xfs_iext_get_extent(ifp, icur, &right)) {
2538 state |= BMAP_RIGHT_VALID;
2539 if (isnullstartblock(right.br_startblock))
2540 state |= BMAP_RIGHT_DELAY;
2544 * Set contiguity flags on the left and right neighbors.
2545 * Don't let extents get too large, even if the pieces are contiguous.
2547 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2548 left.br_startoff + left.br_blockcount == new->br_startoff &&
2549 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2550 state |= BMAP_LEFT_CONTIG;
2552 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2553 new->br_startoff + new->br_blockcount == right.br_startoff &&
2554 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2555 (!(state & BMAP_LEFT_CONTIG) ||
2556 (left.br_blockcount + new->br_blockcount +
2557 right.br_blockcount <= MAXEXTLEN)))
2558 state |= BMAP_RIGHT_CONTIG;
2561 * Switch out based on the contiguity flags.
2563 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2564 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2566 * New allocation is contiguous with delayed allocations
2567 * on the left and on the right.
2568 * Merge all three into a single extent record.
2570 temp = left.br_blockcount + new->br_blockcount +
2571 right.br_blockcount;
2573 oldlen = startblockval(left.br_startblock) +
2574 startblockval(new->br_startblock) +
2575 startblockval(right.br_startblock);
2576 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2578 left.br_startblock = nullstartblock(newlen);
2579 left.br_blockcount = temp;
2581 xfs_iext_remove(ip, icur, state);
2582 xfs_iext_prev(ifp, icur);
2583 xfs_iext_update_extent(ip, state, icur, &left);
2586 case BMAP_LEFT_CONTIG:
2588 * New allocation is contiguous with a delayed allocation
2590 * Merge the new allocation with the left neighbor.
2592 temp = left.br_blockcount + new->br_blockcount;
2594 oldlen = startblockval(left.br_startblock) +
2595 startblockval(new->br_startblock);
2596 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2598 left.br_blockcount = temp;
2599 left.br_startblock = nullstartblock(newlen);
2601 xfs_iext_prev(ifp, icur);
2602 xfs_iext_update_extent(ip, state, icur, &left);
2605 case BMAP_RIGHT_CONTIG:
2607 * New allocation is contiguous with a delayed allocation
2609 * Merge the new allocation with the right neighbor.
2611 temp = new->br_blockcount + right.br_blockcount;
2612 oldlen = startblockval(new->br_startblock) +
2613 startblockval(right.br_startblock);
2614 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2616 right.br_startoff = new->br_startoff;
2617 right.br_startblock = nullstartblock(newlen);
2618 right.br_blockcount = temp;
2619 xfs_iext_update_extent(ip, state, icur, &right);
2624 * New allocation is not contiguous with another
2625 * delayed allocation.
2626 * Insert a new entry.
2628 oldlen = newlen = 0;
2629 xfs_iext_insert(ip, icur, new, state);
2632 if (oldlen != newlen) {
2633 ASSERT(oldlen > newlen);
2634 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2637 * Nothing to do for disk quota accounting here.
2639 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2644 * Convert a hole to a real allocation.
2646 STATIC int /* error */
2647 xfs_bmap_add_extent_hole_real(
2648 struct xfs_trans *tp,
2649 struct xfs_inode *ip,
2651 struct xfs_iext_cursor *icur,
2652 struct xfs_btree_cur **curp,
2653 struct xfs_bmbt_irec *new,
2657 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2658 struct xfs_mount *mp = ip->i_mount;
2659 struct xfs_btree_cur *cur = *curp;
2660 int error; /* error return value */
2661 int i; /* temp state */
2662 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2663 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2664 int rval=0; /* return value (logging flags) */
2665 int state = xfs_bmap_fork_to_state(whichfork);
2666 struct xfs_bmbt_irec old;
2668 ASSERT(!isnullstartblock(new->br_startblock));
2669 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2671 XFS_STATS_INC(mp, xs_add_exlist);
2674 * Check and set flags if this segment has a left neighbor.
2676 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2677 state |= BMAP_LEFT_VALID;
2678 if (isnullstartblock(left.br_startblock))
2679 state |= BMAP_LEFT_DELAY;
2683 * Check and set flags if this segment has a current value.
2684 * Not true if we're inserting into the "hole" at eof.
2686 if (xfs_iext_get_extent(ifp, icur, &right)) {
2687 state |= BMAP_RIGHT_VALID;
2688 if (isnullstartblock(right.br_startblock))
2689 state |= BMAP_RIGHT_DELAY;
2693 * We're inserting a real allocation between "left" and "right".
2694 * Set the contiguity flags. Don't let extents get too large.
2696 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2697 left.br_startoff + left.br_blockcount == new->br_startoff &&
2698 left.br_startblock + left.br_blockcount == new->br_startblock &&
2699 left.br_state == new->br_state &&
2700 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2701 state |= BMAP_LEFT_CONTIG;
2703 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2704 new->br_startoff + new->br_blockcount == right.br_startoff &&
2705 new->br_startblock + new->br_blockcount == right.br_startblock &&
2706 new->br_state == right.br_state &&
2707 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2708 (!(state & BMAP_LEFT_CONTIG) ||
2709 left.br_blockcount + new->br_blockcount +
2710 right.br_blockcount <= MAXEXTLEN))
2711 state |= BMAP_RIGHT_CONTIG;
2715 * Select which case we're in here, and implement it.
2717 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2718 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2720 * New allocation is contiguous with real allocations on the
2721 * left and on the right.
2722 * Merge all three into a single extent record.
2724 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2726 xfs_iext_remove(ip, icur, state);
2727 xfs_iext_prev(ifp, icur);
2728 xfs_iext_update_extent(ip, state, icur, &left);
2730 XFS_IFORK_NEXT_SET(ip, whichfork,
2731 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2733 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2735 rval = XFS_ILOG_CORE;
2736 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2739 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2740 error = xfs_btree_delete(cur, &i);
2743 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2744 error = xfs_btree_decrement(cur, 0, &i);
2747 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2748 error = xfs_bmbt_update(cur, &left);
2754 case BMAP_LEFT_CONTIG:
2756 * New allocation is contiguous with a real allocation
2758 * Merge the new allocation with the left neighbor.
2761 left.br_blockcount += new->br_blockcount;
2763 xfs_iext_prev(ifp, icur);
2764 xfs_iext_update_extent(ip, state, icur, &left);
2767 rval = xfs_ilog_fext(whichfork);
2770 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2773 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2774 error = xfs_bmbt_update(cur, &left);
2780 case BMAP_RIGHT_CONTIG:
2782 * New allocation is contiguous with a real allocation
2784 * Merge the new allocation with the right neighbor.
2788 right.br_startoff = new->br_startoff;
2789 right.br_startblock = new->br_startblock;
2790 right.br_blockcount += new->br_blockcount;
2791 xfs_iext_update_extent(ip, state, icur, &right);
2794 rval = xfs_ilog_fext(whichfork);
2797 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2800 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2801 error = xfs_bmbt_update(cur, &right);
2809 * New allocation is not contiguous with another
2811 * Insert a new entry.
2813 xfs_iext_insert(ip, icur, new, state);
2814 XFS_IFORK_NEXT_SET(ip, whichfork,
2815 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2817 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2819 rval = XFS_ILOG_CORE;
2820 error = xfs_bmbt_lookup_eq(cur, new, &i);
2823 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2824 error = xfs_btree_insert(cur, &i);
2827 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2832 /* add reverse mapping unless caller opted out */
2833 if (!(flags & XFS_BMAPI_NORMAP))
2834 xfs_rmap_map_extent(tp, ip, whichfork, new);
2836 /* convert to a btree if necessary */
2837 if (xfs_bmap_needs_btree(ip, whichfork)) {
2838 int tmp_logflags; /* partial log flag return val */
2840 ASSERT(cur == NULL);
2841 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2842 &tmp_logflags, whichfork);
2843 *logflagsp |= tmp_logflags;
2849 /* clear out the allocated field, done with it now in any case. */
2851 cur->bc_private.b.allocated = 0;
2853 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2860 * Functions used in the extent read, allocate and remove paths
2864 * Adjust the size of the new extent based on di_extsize and rt extsize.
2867 xfs_bmap_extsize_align(
2869 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2870 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2871 xfs_extlen_t extsz, /* align to this extent size */
2872 int rt, /* is this a realtime inode? */
2873 int eof, /* is extent at end-of-file? */
2874 int delay, /* creating delalloc extent? */
2875 int convert, /* overwriting unwritten extent? */
2876 xfs_fileoff_t *offp, /* in/out: aligned offset */
2877 xfs_extlen_t *lenp) /* in/out: aligned length */
2879 xfs_fileoff_t orig_off; /* original offset */
2880 xfs_extlen_t orig_alen; /* original length */
2881 xfs_fileoff_t orig_end; /* original off+len */
2882 xfs_fileoff_t nexto; /* next file offset */
2883 xfs_fileoff_t prevo; /* previous file offset */
2884 xfs_fileoff_t align_off; /* temp for offset */
2885 xfs_extlen_t align_alen; /* temp for length */
2886 xfs_extlen_t temp; /* temp for calculations */
2891 orig_off = align_off = *offp;
2892 orig_alen = align_alen = *lenp;
2893 orig_end = orig_off + orig_alen;
2896 * If this request overlaps an existing extent, then don't
2897 * attempt to perform any additional alignment.
2899 if (!delay && !eof &&
2900 (orig_off >= gotp->br_startoff) &&
2901 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2906 * If the file offset is unaligned vs. the extent size
2907 * we need to align it. This will be possible unless
2908 * the file was previously written with a kernel that didn't
2909 * perform this alignment, or if a truncate shot us in the
2912 div_u64_rem(orig_off, extsz, &temp);
2918 /* Same adjustment for the end of the requested area. */
2919 temp = (align_alen % extsz);
2921 align_alen += extsz - temp;
2924 * For large extent hint sizes, the aligned extent might be larger than
2925 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2926 * the length back under MAXEXTLEN. The outer allocation loops handle
2927 * short allocation just fine, so it is safe to do this. We only want to
2928 * do it when we are forced to, though, because it means more allocation
2929 * operations are required.
2931 while (align_alen > MAXEXTLEN)
2932 align_alen -= extsz;
2933 ASSERT(align_alen <= MAXEXTLEN);
2936 * If the previous block overlaps with this proposed allocation
2937 * then move the start forward without adjusting the length.
2939 if (prevp->br_startoff != NULLFILEOFF) {
2940 if (prevp->br_startblock == HOLESTARTBLOCK)
2941 prevo = prevp->br_startoff;
2943 prevo = prevp->br_startoff + prevp->br_blockcount;
2946 if (align_off != orig_off && align_off < prevo)
2949 * If the next block overlaps with this proposed allocation
2950 * then move the start back without adjusting the length,
2951 * but not before offset 0.
2952 * This may of course make the start overlap previous block,
2953 * and if we hit the offset 0 limit then the next block
2954 * can still overlap too.
2956 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2957 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2958 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2959 nexto = gotp->br_startoff + gotp->br_blockcount;
2961 nexto = gotp->br_startoff;
2963 nexto = NULLFILEOFF;
2965 align_off + align_alen != orig_end &&
2966 align_off + align_alen > nexto)
2967 align_off = nexto > align_alen ? nexto - align_alen : 0;
2969 * If we're now overlapping the next or previous extent that
2970 * means we can't fit an extsz piece in this hole. Just move
2971 * the start forward to the first valid spot and set
2972 * the length so we hit the end.
2974 if (align_off != orig_off && align_off < prevo)
2976 if (align_off + align_alen != orig_end &&
2977 align_off + align_alen > nexto &&
2978 nexto != NULLFILEOFF) {
2979 ASSERT(nexto > prevo);
2980 align_alen = nexto - align_off;
2984 * If realtime, and the result isn't a multiple of the realtime
2985 * extent size we need to remove blocks until it is.
2987 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2989 * We're not covering the original request, or
2990 * we won't be able to once we fix the length.
2992 if (orig_off < align_off ||
2993 orig_end > align_off + align_alen ||
2994 align_alen - temp < orig_alen)
2997 * Try to fix it by moving the start up.
2999 if (align_off + temp <= orig_off) {
3004 * Try to fix it by moving the end in.
3006 else if (align_off + align_alen - temp >= orig_end)
3009 * Set the start to the minimum then trim the length.
3012 align_alen -= orig_off - align_off;
3013 align_off = orig_off;
3014 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3017 * Result doesn't cover the request, fail it.
3019 if (orig_off < align_off || orig_end > align_off + align_alen)
3022 ASSERT(orig_off >= align_off);
3023 /* see MAXEXTLEN handling above */
3024 ASSERT(orig_end <= align_off + align_alen ||
3025 align_alen + extsz > MAXEXTLEN);
3029 if (!eof && gotp->br_startoff != NULLFILEOFF)
3030 ASSERT(align_off + align_alen <= gotp->br_startoff);
3031 if (prevp->br_startoff != NULLFILEOFF)
3032 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3040 #define XFS_ALLOC_GAP_UNITS 4
3044 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3046 xfs_fsblock_t adjust; /* adjustment to block numbers */
3047 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3048 xfs_mount_t *mp; /* mount point structure */
3049 int nullfb; /* true if ap->firstblock isn't set */
3050 int rt; /* true if inode is realtime */
3052 #define ISVALID(x,y) \
3054 (x) < mp->m_sb.sb_rblocks : \
3055 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3056 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3057 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3059 mp = ap->ip->i_mount;
3060 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3061 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3062 xfs_alloc_is_userdata(ap->datatype);
3063 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3064 ap->tp->t_firstblock);
3066 * If allocating at eof, and there's a previous real block,
3067 * try to use its last block as our starting point.
3069 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3070 !isnullstartblock(ap->prev.br_startblock) &&
3071 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3072 ap->prev.br_startblock)) {
3073 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3075 * Adjust for the gap between prevp and us.
3077 adjust = ap->offset -
3078 (ap->prev.br_startoff + ap->prev.br_blockcount);
3080 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3081 ap->blkno += adjust;
3084 * If not at eof, then compare the two neighbor blocks.
3085 * Figure out whether either one gives us a good starting point,
3086 * and pick the better one.
3088 else if (!ap->eof) {
3089 xfs_fsblock_t gotbno; /* right side block number */
3090 xfs_fsblock_t gotdiff=0; /* right side difference */
3091 xfs_fsblock_t prevbno; /* left side block number */
3092 xfs_fsblock_t prevdiff=0; /* left side difference */
3095 * If there's a previous (left) block, select a requested
3096 * start block based on it.
3098 if (ap->prev.br_startoff != NULLFILEOFF &&
3099 !isnullstartblock(ap->prev.br_startblock) &&
3100 (prevbno = ap->prev.br_startblock +
3101 ap->prev.br_blockcount) &&
3102 ISVALID(prevbno, ap->prev.br_startblock)) {
3104 * Calculate gap to end of previous block.
3106 adjust = prevdiff = ap->offset -
3107 (ap->prev.br_startoff +
3108 ap->prev.br_blockcount);
3110 * Figure the startblock based on the previous block's
3111 * end and the gap size.
3113 * If the gap is large relative to the piece we're
3114 * allocating, or using it gives us an invalid block
3115 * number, then just use the end of the previous block.
3117 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3118 ISVALID(prevbno + prevdiff,
3119 ap->prev.br_startblock))
3124 * If the firstblock forbids it, can't use it,
3127 if (!rt && !nullfb &&
3128 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3129 prevbno = NULLFSBLOCK;
3132 * No previous block or can't follow it, just default.
3135 prevbno = NULLFSBLOCK;
3137 * If there's a following (right) block, select a requested
3138 * start block based on it.
3140 if (!isnullstartblock(ap->got.br_startblock)) {
3142 * Calculate gap to start of next block.
3144 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3146 * Figure the startblock based on the next block's
3147 * start and the gap size.
3149 gotbno = ap->got.br_startblock;
3152 * If the gap is large relative to the piece we're
3153 * allocating, or using it gives us an invalid block
3154 * number, then just use the start of the next block
3155 * offset by our length.
3157 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3158 ISVALID(gotbno - gotdiff, gotbno))
3160 else if (ISVALID(gotbno - ap->length, gotbno)) {
3161 gotbno -= ap->length;
3162 gotdiff += adjust - ap->length;
3166 * If the firstblock forbids it, can't use it,
3169 if (!rt && !nullfb &&
3170 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3171 gotbno = NULLFSBLOCK;
3174 * No next block, just default.
3177 gotbno = NULLFSBLOCK;
3179 * If both valid, pick the better one, else the only good
3180 * one, else ap->blkno is already set (to 0 or the inode block).
3182 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3183 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3184 else if (prevbno != NULLFSBLOCK)
3185 ap->blkno = prevbno;
3186 else if (gotbno != NULLFSBLOCK)
3193 xfs_bmap_longest_free_extent(
3194 struct xfs_trans *tp,
3199 struct xfs_mount *mp = tp->t_mountp;
3200 struct xfs_perag *pag;
3201 xfs_extlen_t longest;
3204 pag = xfs_perag_get(mp, ag);
3205 if (!pag->pagf_init) {
3206 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3210 if (!pag->pagf_init) {
3216 longest = xfs_alloc_longest_free_extent(pag,
3217 xfs_alloc_min_freelist(mp, pag),
3218 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3219 if (*blen < longest)
3228 xfs_bmap_select_minlen(
3229 struct xfs_bmalloca *ap,
3230 struct xfs_alloc_arg *args,
3234 if (notinit || *blen < ap->minlen) {
3236 * Since we did a BUF_TRYLOCK above, it is possible that
3237 * there is space for this request.
3239 args->minlen = ap->minlen;
3240 } else if (*blen < args->maxlen) {
3242 * If the best seen length is less than the request length,
3243 * use the best as the minimum.
3245 args->minlen = *blen;
3248 * Otherwise we've seen an extent as big as maxlen, use that
3251 args->minlen = args->maxlen;
3256 xfs_bmap_btalloc_nullfb(
3257 struct xfs_bmalloca *ap,
3258 struct xfs_alloc_arg *args,
3261 struct xfs_mount *mp = ap->ip->i_mount;
3262 xfs_agnumber_t ag, startag;
3266 args->type = XFS_ALLOCTYPE_START_BNO;
3267 args->total = ap->total;
3269 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3270 if (startag == NULLAGNUMBER)
3273 while (*blen < args->maxlen) {
3274 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3279 if (++ag == mp->m_sb.sb_agcount)
3285 xfs_bmap_select_minlen(ap, args, blen, notinit);
3290 xfs_bmap_btalloc_filestreams(
3291 struct xfs_bmalloca *ap,
3292 struct xfs_alloc_arg *args,
3295 struct xfs_mount *mp = ap->ip->i_mount;
3300 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3301 args->total = ap->total;
3303 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3304 if (ag == NULLAGNUMBER)
3307 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3311 if (*blen < args->maxlen) {
3312 error = xfs_filestream_new_ag(ap, &ag);
3316 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3323 xfs_bmap_select_minlen(ap, args, blen, notinit);
3326 * Set the failure fallback case to look in the selected AG as stream
3329 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3333 /* Update all inode and quota accounting for the allocation we just did. */
3335 xfs_bmap_btalloc_accounting(
3336 struct xfs_bmalloca *ap,
3337 struct xfs_alloc_arg *args)
3339 if (ap->flags & XFS_BMAPI_COWFORK) {
3341 * COW fork blocks are in-core only and thus are treated as
3342 * in-core quota reservation (like delalloc blocks) even when
3343 * converted to real blocks. The quota reservation is not
3344 * accounted to disk until blocks are remapped to the data
3345 * fork. So if these blocks were previously delalloc, we
3346 * already have quota reservation and there's nothing to do
3350 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3355 * Otherwise, we've allocated blocks in a hole. The transaction
3356 * has acquired in-core quota reservation for this extent.
3357 * Rather than account these as real blocks, however, we reduce
3358 * the transaction quota reservation based on the allocation.
3359 * This essentially transfers the transaction quota reservation
3360 * to that of a delalloc extent.
3362 ap->ip->i_delayed_blks += args->len;
3363 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3368 /* data/attr fork only */
3369 ap->ip->i_d.di_nblocks += args->len;
3370 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3372 ap->ip->i_delayed_blks -= args->len;
3373 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3375 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3376 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3382 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3384 xfs_mount_t *mp; /* mount point structure */
3385 xfs_alloctype_t atype = 0; /* type for allocation routines */
3386 xfs_extlen_t align = 0; /* minimum allocation alignment */
3387 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3389 xfs_alloc_arg_t args;
3390 xfs_fileoff_t orig_offset;
3391 xfs_extlen_t orig_length;
3393 xfs_extlen_t nextminlen = 0;
3394 int nullfb; /* true if ap->firstblock isn't set */
3401 orig_offset = ap->offset;
3402 orig_length = ap->length;
3404 mp = ap->ip->i_mount;
3406 /* stripe alignment for allocation is determined by mount parameters */
3408 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3409 stripe_align = mp->m_swidth;
3410 else if (mp->m_dalign)
3411 stripe_align = mp->m_dalign;
3413 if (ap->flags & XFS_BMAPI_COWFORK)
3414 align = xfs_get_cowextsz_hint(ap->ip);
3415 else if (xfs_alloc_is_userdata(ap->datatype))
3416 align = xfs_get_extsz_hint(ap->ip);
3418 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3419 align, 0, ap->eof, 0, ap->conv,
3420 &ap->offset, &ap->length);
3426 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3427 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3428 ap->tp->t_firstblock);
3430 if (xfs_alloc_is_userdata(ap->datatype) &&
3431 xfs_inode_is_filestream(ap->ip)) {
3432 ag = xfs_filestream_lookup_ag(ap->ip);
3433 ag = (ag != NULLAGNUMBER) ? ag : 0;
3434 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3436 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3439 ap->blkno = ap->tp->t_firstblock;
3441 xfs_bmap_adjacent(ap);
3444 * If allowed, use ap->blkno; otherwise must use firstblock since
3445 * it's in the right allocation group.
3447 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3450 ap->blkno = ap->tp->t_firstblock;
3452 * Normal allocation, done through xfs_alloc_vextent.
3454 tryagain = isaligned = 0;
3455 memset(&args, 0, sizeof(args));
3458 args.fsbno = ap->blkno;
3459 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3461 /* Trim the allocation back to the maximum an AG can fit. */
3462 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3466 * Search for an allocation group with a single extent large
3467 * enough for the request. If one isn't found, then adjust
3468 * the minimum allocation size to the largest space found.
3470 if (xfs_alloc_is_userdata(ap->datatype) &&
3471 xfs_inode_is_filestream(ap->ip))
3472 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3474 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3477 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3478 if (xfs_inode_is_filestream(ap->ip))
3479 args.type = XFS_ALLOCTYPE_FIRST_AG;
3481 args.type = XFS_ALLOCTYPE_START_BNO;
3482 args.total = args.minlen = ap->minlen;
3484 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3485 args.total = ap->total;
3486 args.minlen = ap->minlen;
3488 /* apply extent size hints if obtained earlier */
3491 div_u64_rem(ap->offset, args.prod, &args.mod);
3493 args.mod = args.prod - args.mod;
3494 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3498 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3499 div_u64_rem(ap->offset, args.prod, &args.mod);
3501 args.mod = args.prod - args.mod;
3504 * If we are not low on available data blocks, and the underlying
3505 * logical volume manager is a stripe, and the file offset is zero then
3506 * try to allocate data blocks on stripe unit boundary. NOTE: ap->aeof
3507 * is only set if the allocation length is >= the stripe unit and the
3508 * allocation offset is at the end of file.
3510 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3512 args.alignment = stripe_align;
3516 * Adjust minlen to try and preserve alignment if we
3517 * can't guarantee an aligned maxlen extent.
3519 if (blen > args.alignment &&
3520 blen <= args.maxlen + args.alignment)
3521 args.minlen = blen - args.alignment;
3522 args.minalignslop = 0;
3525 * First try an exact bno allocation.
3526 * If it fails then do a near or start bno
3527 * allocation with alignment turned on.
3531 args.type = XFS_ALLOCTYPE_THIS_BNO;
3534 * Compute the minlen+alignment for the
3535 * next case. Set slop so that the value
3536 * of minlen+alignment+slop doesn't go up
3537 * between the calls.
3539 if (blen > stripe_align && blen <= args.maxlen)
3540 nextminlen = blen - stripe_align;
3542 nextminlen = args.minlen;
3543 if (nextminlen + stripe_align > args.minlen + 1)
3545 nextminlen + stripe_align -
3548 args.minalignslop = 0;
3552 args.minalignslop = 0;
3554 args.minleft = ap->minleft;
3555 args.wasdel = ap->wasdel;
3556 args.resv = XFS_AG_RESV_NONE;
3557 args.datatype = ap->datatype;
3558 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3561 error = xfs_alloc_vextent(&args);
3565 if (tryagain && args.fsbno == NULLFSBLOCK) {
3567 * Exact allocation failed. Now try with alignment
3571 args.fsbno = ap->blkno;
3572 args.alignment = stripe_align;
3573 args.minlen = nextminlen;
3574 args.minalignslop = 0;
3576 if ((error = xfs_alloc_vextent(&args)))
3579 if (isaligned && args.fsbno == NULLFSBLOCK) {
3581 * allocation failed, so turn off alignment and
3585 args.fsbno = ap->blkno;
3587 if ((error = xfs_alloc_vextent(&args)))
3590 if (args.fsbno == NULLFSBLOCK && nullfb &&
3591 args.minlen > ap->minlen) {
3592 args.minlen = ap->minlen;
3593 args.type = XFS_ALLOCTYPE_START_BNO;
3594 args.fsbno = ap->blkno;
3595 if ((error = xfs_alloc_vextent(&args)))
3598 if (args.fsbno == NULLFSBLOCK && nullfb) {
3600 args.type = XFS_ALLOCTYPE_FIRST_AG;
3601 args.total = ap->minlen;
3602 if ((error = xfs_alloc_vextent(&args)))
3604 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3606 if (args.fsbno != NULLFSBLOCK) {
3608 * check the allocation happened at the same or higher AG than
3609 * the first block that was allocated.
3611 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
3612 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
3613 XFS_FSB_TO_AGNO(mp, args.fsbno));
3615 ap->blkno = args.fsbno;
3616 if (ap->tp->t_firstblock == NULLFSBLOCK)
3617 ap->tp->t_firstblock = args.fsbno;
3618 ASSERT(nullfb || fb_agno <= args.agno);
3619 ap->length = args.len;
3621 * If the extent size hint is active, we tried to round the
3622 * caller's allocation request offset down to extsz and the
3623 * length up to another extsz boundary. If we found a free
3624 * extent we mapped it in starting at this new offset. If the
3625 * newly mapped space isn't long enough to cover any of the
3626 * range of offsets that was originally requested, move the
3627 * mapping up so that we can fill as much of the caller's
3628 * original request as possible. Free space is apparently
3629 * very fragmented so we're unlikely to be able to satisfy the
3632 if (ap->length <= orig_length)
3633 ap->offset = orig_offset;
3634 else if (ap->offset + ap->length < orig_offset + orig_length)
3635 ap->offset = orig_offset + orig_length - ap->length;
3636 xfs_bmap_btalloc_accounting(ap, &args);
3638 ap->blkno = NULLFSBLOCK;
3645 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3646 * It figures out where to ask the underlying allocator to put the new extent.
3650 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3652 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3653 xfs_alloc_is_userdata(ap->datatype))
3654 return xfs_bmap_rtalloc(ap);
3655 return xfs_bmap_btalloc(ap);
3658 /* Trim extent to fit a logical block range. */
3661 struct xfs_bmbt_irec *irec,
3665 xfs_fileoff_t distance;
3666 xfs_fileoff_t end = bno + len;
3668 if (irec->br_startoff + irec->br_blockcount <= bno ||
3669 irec->br_startoff >= end) {
3670 irec->br_blockcount = 0;
3674 if (irec->br_startoff < bno) {
3675 distance = bno - irec->br_startoff;
3676 if (isnullstartblock(irec->br_startblock))
3677 irec->br_startblock = DELAYSTARTBLOCK;
3678 if (irec->br_startblock != DELAYSTARTBLOCK &&
3679 irec->br_startblock != HOLESTARTBLOCK)
3680 irec->br_startblock += distance;
3681 irec->br_startoff += distance;
3682 irec->br_blockcount -= distance;
3685 if (end < irec->br_startoff + irec->br_blockcount) {
3686 distance = irec->br_startoff + irec->br_blockcount - end;
3687 irec->br_blockcount -= distance;
3692 * Trim the returned map to the required bounds
3696 struct xfs_bmbt_irec *mval,
3697 struct xfs_bmbt_irec *got,
3705 if ((flags & XFS_BMAPI_ENTIRE) ||
3706 got->br_startoff + got->br_blockcount <= obno) {
3708 if (isnullstartblock(got->br_startblock))
3709 mval->br_startblock = DELAYSTARTBLOCK;
3715 ASSERT((*bno >= obno) || (n == 0));
3717 mval->br_startoff = *bno;
3718 if (isnullstartblock(got->br_startblock))
3719 mval->br_startblock = DELAYSTARTBLOCK;
3721 mval->br_startblock = got->br_startblock +
3722 (*bno - got->br_startoff);
3724 * Return the minimum of what we got and what we asked for for
3725 * the length. We can use the len variable here because it is
3726 * modified below and we could have been there before coming
3727 * here if the first part of the allocation didn't overlap what
3730 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3731 got->br_blockcount - (*bno - got->br_startoff));
3732 mval->br_state = got->br_state;
3733 ASSERT(mval->br_blockcount <= len);
3738 * Update and validate the extent map to return
3741 xfs_bmapi_update_map(
3742 struct xfs_bmbt_irec **map,
3750 xfs_bmbt_irec_t *mval = *map;
3752 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3753 ((mval->br_startoff + mval->br_blockcount) <= end));
3754 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3755 (mval->br_startoff < obno));
3757 *bno = mval->br_startoff + mval->br_blockcount;
3759 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3760 /* update previous map with new information */
3761 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3762 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3763 ASSERT(mval->br_state == mval[-1].br_state);
3764 mval[-1].br_blockcount = mval->br_blockcount;
3765 mval[-1].br_state = mval->br_state;
3766 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3767 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3768 mval[-1].br_startblock != HOLESTARTBLOCK &&
3769 mval->br_startblock == mval[-1].br_startblock +
3770 mval[-1].br_blockcount &&
3771 mval[-1].br_state == mval->br_state) {
3772 ASSERT(mval->br_startoff ==
3773 mval[-1].br_startoff + mval[-1].br_blockcount);
3774 mval[-1].br_blockcount += mval->br_blockcount;
3775 } else if (*n > 0 &&
3776 mval->br_startblock == DELAYSTARTBLOCK &&
3777 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3778 mval->br_startoff ==
3779 mval[-1].br_startoff + mval[-1].br_blockcount) {
3780 mval[-1].br_blockcount += mval->br_blockcount;
3781 mval[-1].br_state = mval->br_state;
3782 } else if (!((*n == 0) &&
3783 ((mval->br_startoff + mval->br_blockcount) <=
3792 * Map file blocks to filesystem blocks without allocation.
3796 struct xfs_inode *ip,
3799 struct xfs_bmbt_irec *mval,
3803 struct xfs_mount *mp = ip->i_mount;
3804 struct xfs_ifork *ifp;
3805 struct xfs_bmbt_irec got;
3808 struct xfs_iext_cursor icur;
3812 int whichfork = xfs_bmapi_whichfork(flags);
3815 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3816 XFS_BMAPI_COWFORK)));
3817 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3819 if (unlikely(XFS_TEST_ERROR(
3820 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3821 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3822 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3823 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3824 return -EFSCORRUPTED;
3827 if (XFS_FORCED_SHUTDOWN(mp))
3830 XFS_STATS_INC(mp, xs_blk_mapr);
3832 ifp = XFS_IFORK_PTR(ip, whichfork);
3834 /* No CoW fork? Return a hole. */
3835 if (whichfork == XFS_COW_FORK) {
3836 mval->br_startoff = bno;
3837 mval->br_startblock = HOLESTARTBLOCK;
3838 mval->br_blockcount = len;
3839 mval->br_state = XFS_EXT_NORM;
3845 * A missing attr ifork implies that the inode says we're in
3846 * extents or btree format but failed to pass the inode fork
3847 * verifier while trying to load it. Treat that as a file
3851 xfs_alert(mp, "%s: inode %llu missing fork %d",
3852 __func__, ip->i_ino, whichfork);
3854 return -EFSCORRUPTED;
3857 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3858 error = xfs_iread_extents(NULL, ip, whichfork);
3863 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3868 while (bno < end && n < *nmap) {
3869 /* Reading past eof, act as though there's a hole up to end. */
3871 got.br_startoff = end;
3872 if (got.br_startoff > bno) {
3873 /* Reading in a hole. */
3874 mval->br_startoff = bno;
3875 mval->br_startblock = HOLESTARTBLOCK;
3876 mval->br_blockcount =
3877 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3878 mval->br_state = XFS_EXT_NORM;
3879 bno += mval->br_blockcount;
3880 len -= mval->br_blockcount;
3886 /* set up the extent map to return. */
3887 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3888 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3890 /* If we're done, stop now. */
3891 if (bno >= end || n >= *nmap)
3894 /* Else go on to the next record. */
3895 if (!xfs_iext_next_extent(ifp, &icur, &got))
3903 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3904 * global pool and the extent inserted into the inode in-core extent tree.
3906 * On entry, got refers to the first extent beyond the offset of the extent to
3907 * allocate or eof is specified if no such extent exists. On return, got refers
3908 * to the extent record that was inserted to the inode fork.
3910 * Note that the allocated extent may have been merged with contiguous extents
3911 * during insertion into the inode fork. Thus, got does not reflect the current
3912 * state of the inode fork on return. If necessary, the caller can use lastx to
3913 * look up the updated record in the inode fork.
3916 xfs_bmapi_reserve_delalloc(
3917 struct xfs_inode *ip,
3921 xfs_filblks_t prealloc,
3922 struct xfs_bmbt_irec *got,
3923 struct xfs_iext_cursor *icur,
3926 struct xfs_mount *mp = ip->i_mount;
3927 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3929 xfs_extlen_t indlen;
3931 xfs_fileoff_t aoff = off;
3934 * Cap the alloc length. Keep track of prealloc so we know whether to
3935 * tag the inode before we return.
3937 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3939 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3940 if (prealloc && alen >= len)
3941 prealloc = alen - len;
3943 /* Figure out the extent size, adjust alen */
3944 if (whichfork == XFS_COW_FORK) {
3945 struct xfs_bmbt_irec prev;
3946 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
3948 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3949 prev.br_startoff = NULLFILEOFF;
3951 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3952 1, 0, &aoff, &alen);
3957 * Make a transaction-less quota reservation for delayed allocation
3958 * blocks. This number gets adjusted later. We return if we haven't
3959 * allocated blocks already inside this loop.
3961 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
3962 XFS_QMOPT_RES_REGBLKS);
3967 * Split changing sb for alen and indlen since they could be coming
3968 * from different places.
3970 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
3973 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
3975 goto out_unreserve_quota;
3977 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
3979 goto out_unreserve_blocks;
3982 ip->i_delayed_blks += alen;
3983 xfs_mod_delalloc(ip->i_mount, alen + indlen);
3985 got->br_startoff = aoff;
3986 got->br_startblock = nullstartblock(indlen);
3987 got->br_blockcount = alen;
3988 got->br_state = XFS_EXT_NORM;
3990 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
3993 * Tag the inode if blocks were preallocated. Note that COW fork
3994 * preallocation can occur at the start or end of the extent, even when
3995 * prealloc == 0, so we must also check the aligned offset and length.
3997 if (whichfork == XFS_DATA_FORK && prealloc)
3998 xfs_inode_set_eofblocks_tag(ip);
3999 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4000 xfs_inode_set_cowblocks_tag(ip);
4004 out_unreserve_blocks:
4005 xfs_mod_fdblocks(mp, alen, false);
4006 out_unreserve_quota:
4007 if (XFS_IS_QUOTA_ON(mp))
4008 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
4009 XFS_QMOPT_RES_REGBLKS);
4015 struct xfs_bmalloca *bma)
4017 struct xfs_mount *mp = bma->ip->i_mount;
4018 int whichfork = xfs_bmapi_whichfork(bma->flags);
4019 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4020 int tmp_logflags = 0;
4023 ASSERT(bma->length > 0);
4026 * For the wasdelay case, we could also just allocate the stuff asked
4027 * for in this bmap call but that wouldn't be as good.
4030 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4031 bma->offset = bma->got.br_startoff;
4032 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
4034 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4036 bma->length = XFS_FILBLKS_MIN(bma->length,
4037 bma->got.br_startoff - bma->offset);
4041 * Set the data type being allocated. For the data fork, the first data
4042 * in the file is treated differently to all other allocations. For the
4043 * attribute fork, we only need to ensure the allocated range is not on
4046 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4047 bma->datatype = XFS_ALLOC_NOBUSY;
4048 if (whichfork == XFS_DATA_FORK) {
4049 if (bma->offset == 0)
4050 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4052 bma->datatype |= XFS_ALLOC_USERDATA;
4054 if (bma->flags & XFS_BMAPI_ZERO)
4055 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4058 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4061 * Only want to do the alignment at the eof if it is userdata and
4062 * allocation length is larger than a stripe unit.
4064 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4065 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4066 error = xfs_bmap_isaeof(bma, whichfork);
4071 error = xfs_bmap_alloc(bma);
4075 if (bma->blkno == NULLFSBLOCK)
4077 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
4078 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4080 * Bump the number of extents we've allocated
4086 bma->cur->bc_private.b.flags =
4087 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4089 bma->got.br_startoff = bma->offset;
4090 bma->got.br_startblock = bma->blkno;
4091 bma->got.br_blockcount = bma->length;
4092 bma->got.br_state = XFS_EXT_NORM;
4095 * In the data fork, a wasdelay extent has been initialized, so
4096 * shouldn't be flagged as unwritten.
4098 * For the cow fork, however, we convert delalloc reservations
4099 * (extents allocated for speculative preallocation) to
4100 * allocated unwritten extents, and only convert the unwritten
4101 * extents to real extents when we're about to write the data.
4103 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4104 (bma->flags & XFS_BMAPI_PREALLOC))
4105 bma->got.br_state = XFS_EXT_UNWRITTEN;
4108 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4110 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4111 whichfork, &bma->icur, &bma->cur, &bma->got,
4112 &bma->logflags, bma->flags);
4114 bma->logflags |= tmp_logflags;
4119 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4120 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4121 * the neighbouring ones.
4123 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4125 ASSERT(bma->got.br_startoff <= bma->offset);
4126 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4127 bma->offset + bma->length);
4128 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4129 bma->got.br_state == XFS_EXT_UNWRITTEN);
4134 xfs_bmapi_convert_unwritten(
4135 struct xfs_bmalloca *bma,
4136 struct xfs_bmbt_irec *mval,
4140 int whichfork = xfs_bmapi_whichfork(flags);
4141 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4142 int tmp_logflags = 0;
4145 /* check if we need to do unwritten->real conversion */
4146 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4147 (flags & XFS_BMAPI_PREALLOC))
4150 /* check if we need to do real->unwritten conversion */
4151 if (mval->br_state == XFS_EXT_NORM &&
4152 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4153 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4157 * Modify (by adding) the state flag, if writing.
4159 ASSERT(mval->br_blockcount <= len);
4160 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4161 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4162 bma->ip, whichfork);
4164 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4165 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4168 * Before insertion into the bmbt, zero the range being converted
4171 if (flags & XFS_BMAPI_ZERO) {
4172 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4173 mval->br_blockcount);
4178 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4179 &bma->icur, &bma->cur, mval, &tmp_logflags);
4181 * Log the inode core unconditionally in the unwritten extent conversion
4182 * path because the conversion might not have done so (e.g., if the
4183 * extent count hasn't changed). We need to make sure the inode is dirty
4184 * in the transaction for the sake of fsync(), even if nothing has
4185 * changed, because fsync() will not force the log for this transaction
4186 * unless it sees the inode pinned.
4188 * Note: If we're only converting cow fork extents, there aren't
4189 * any on-disk updates to make, so we don't need to log anything.
4191 if (whichfork != XFS_COW_FORK)
4192 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4197 * Update our extent pointer, given that
4198 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4199 * of the neighbouring ones.
4201 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4204 * We may have combined previously unwritten space with written space,
4205 * so generate another request.
4207 if (mval->br_blockcount < len)
4212 static inline xfs_extlen_t
4214 struct xfs_trans *tp,
4215 struct xfs_inode *ip,
4218 if (tp && tp->t_firstblock != NULLFSBLOCK)
4220 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE)
4222 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1;
4226 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4227 * a case where the data is changed, there's an error, and it's not logged so we
4228 * don't shutdown when we should. Don't bother logging extents/btree changes if
4229 * we converted to the other format.
4233 struct xfs_bmalloca *bma,
4237 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4238 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4239 bma->logflags &= ~xfs_ilog_fext(whichfork);
4240 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4241 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE)
4242 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4245 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4247 xfs_btree_del_cursor(bma->cur, error);
4251 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4252 * extent state if necessary. Details behaviour is controlled by the flags
4253 * parameter. Only allocates blocks from a single allocation group, to avoid
4258 struct xfs_trans *tp, /* transaction pointer */
4259 struct xfs_inode *ip, /* incore inode */
4260 xfs_fileoff_t bno, /* starting file offs. mapped */
4261 xfs_filblks_t len, /* length to map in file */
4262 int flags, /* XFS_BMAPI_... */
4263 xfs_extlen_t total, /* total blocks needed */
4264 struct xfs_bmbt_irec *mval, /* output: map values */
4265 int *nmap) /* i/o: mval size/count */
4267 struct xfs_bmalloca bma = {
4272 struct xfs_mount *mp = ip->i_mount;
4273 struct xfs_ifork *ifp;
4274 xfs_fileoff_t end; /* end of mapped file region */
4275 bool eof = false; /* after the end of extents */
4276 int error; /* error return */
4277 int n; /* current extent index */
4278 xfs_fileoff_t obno; /* old block number (offset) */
4279 int whichfork; /* data or attr fork */
4282 xfs_fileoff_t orig_bno; /* original block number value */
4283 int orig_flags; /* original flags arg value */
4284 xfs_filblks_t orig_len; /* original value of len arg */
4285 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4286 int orig_nmap; /* original value of *nmap */
4294 whichfork = xfs_bmapi_whichfork(flags);
4297 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4300 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4301 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4302 ASSERT(!(flags & XFS_BMAPI_REMAP));
4304 /* zeroing is for currently only for data extents, not metadata */
4305 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4306 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4308 * we can allocate unwritten extents or pre-zero allocated blocks,
4309 * but it makes no sense to do both at once. This would result in
4310 * zeroing the unwritten extent twice, but it still being an
4311 * unwritten extent....
4313 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4314 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4316 if (unlikely(XFS_TEST_ERROR(
4317 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4318 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4319 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4320 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4321 return -EFSCORRUPTED;
4324 if (XFS_FORCED_SHUTDOWN(mp))
4327 ifp = XFS_IFORK_PTR(ip, whichfork);
4329 XFS_STATS_INC(mp, xs_blk_mapw);
4331 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4332 error = xfs_iread_extents(tp, ip, whichfork);
4337 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4339 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4340 bma.prev.br_startoff = NULLFILEOFF;
4341 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4346 while (bno < end && n < *nmap) {
4347 bool need_alloc = false, wasdelay = false;
4349 /* in hole or beyond EOF? */
4350 if (eof || bma.got.br_startoff > bno) {
4352 * CoW fork conversions should /never/ hit EOF or
4353 * holes. There should always be something for us
4356 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4357 (flags & XFS_BMAPI_COWFORK)));
4360 } else if (isnullstartblock(bma.got.br_startblock)) {
4365 * First, deal with the hole before the allocated space
4366 * that we found, if any.
4368 if (need_alloc || wasdelay) {
4370 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4371 bma.wasdel = wasdelay;
4376 * There's a 32/64 bit type mismatch between the
4377 * allocation length request (which can be 64 bits in
4378 * length) and the bma length request, which is
4379 * xfs_extlen_t and therefore 32 bits. Hence we have to
4380 * check for 32-bit overflows and handle them here.
4382 if (len > (xfs_filblks_t)MAXEXTLEN)
4383 bma.length = MAXEXTLEN;
4388 ASSERT(bma.length > 0);
4389 error = xfs_bmapi_allocate(&bma);
4392 if (bma.blkno == NULLFSBLOCK)
4396 * If this is a CoW allocation, record the data in
4397 * the refcount btree for orphan recovery.
4399 if (whichfork == XFS_COW_FORK)
4400 xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4404 /* Deal with the allocated space we found. */
4405 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4408 /* Execute unwritten extent conversion if necessary */
4409 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4410 if (error == -EAGAIN)
4415 /* update the extent map to return */
4416 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4419 * If we're done, stop now. Stop when we've allocated
4420 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4421 * the transaction may get too big.
4423 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4426 /* Else go on to the next record. */
4428 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4433 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4438 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4439 XFS_IFORK_NEXTENTS(ip, whichfork) >
4440 XFS_IFORK_MAXEXT(ip, whichfork));
4441 xfs_bmapi_finish(&bma, whichfork, 0);
4442 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4446 xfs_bmapi_finish(&bma, whichfork, error);
4451 * Convert an existing delalloc extent to real blocks based on file offset. This
4452 * attempts to allocate the entire delalloc extent and may require multiple
4453 * invocations to allocate the target offset if a large enough physical extent
4457 xfs_bmapi_convert_delalloc(
4458 struct xfs_inode *ip,
4461 struct iomap *iomap,
4464 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4465 struct xfs_mount *mp = ip->i_mount;
4466 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
4467 struct xfs_bmalloca bma = { NULL };
4469 struct xfs_trans *tp;
4472 if (whichfork == XFS_COW_FORK)
4473 flags |= IOMAP_F_SHARED;
4476 * Space for the extent and indirect blocks was reserved when the
4477 * delalloc extent was created so there's no need to do so here.
4479 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4480 XFS_TRANS_RESERVE, &tp);
4484 xfs_ilock(ip, XFS_ILOCK_EXCL);
4485 xfs_trans_ijoin(tp, ip, 0);
4487 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4488 bma.got.br_startoff > offset_fsb) {
4490 * No extent found in the range we are trying to convert. This
4491 * should only happen for the COW fork, where another thread
4492 * might have moved the extent to the data fork in the meantime.
4494 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4496 goto out_trans_cancel;
4500 * If we find a real extent here we raced with another thread converting
4501 * the extent. Just return the real extent at this offset.
4503 if (!isnullstartblock(bma.got.br_startblock)) {
4504 xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4505 *seq = READ_ONCE(ifp->if_seq);
4506 goto out_trans_cancel;
4512 bma.offset = bma.got.br_startoff;
4513 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
4514 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4515 if (whichfork == XFS_COW_FORK)
4516 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
4518 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4519 bma.prev.br_startoff = NULLFILEOFF;
4521 error = xfs_bmapi_allocate(&bma);
4526 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4528 error = -EFSCORRUPTED;
4529 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4532 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4533 XFS_STATS_INC(mp, xs_xstrat_quick);
4535 ASSERT(!isnullstartblock(bma.got.br_startblock));
4536 xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4537 *seq = READ_ONCE(ifp->if_seq);
4539 if (whichfork == XFS_COW_FORK)
4540 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4542 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4547 xfs_bmapi_finish(&bma, whichfork, 0);
4548 error = xfs_trans_commit(tp);
4549 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4553 xfs_bmapi_finish(&bma, whichfork, error);
4555 xfs_trans_cancel(tp);
4556 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4562 struct xfs_trans *tp,
4563 struct xfs_inode *ip,
4566 xfs_fsblock_t startblock,
4569 struct xfs_mount *mp = ip->i_mount;
4570 struct xfs_ifork *ifp;
4571 struct xfs_btree_cur *cur = NULL;
4572 struct xfs_bmbt_irec got;
4573 struct xfs_iext_cursor icur;
4574 int whichfork = xfs_bmapi_whichfork(flags);
4575 int logflags = 0, error;
4577 ifp = XFS_IFORK_PTR(ip, whichfork);
4579 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4580 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4581 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4582 XFS_BMAPI_NORMAP)));
4583 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4584 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4586 if (unlikely(XFS_TEST_ERROR(
4587 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4588 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4589 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4590 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4591 return -EFSCORRUPTED;
4594 if (XFS_FORCED_SHUTDOWN(mp))
4597 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4598 error = xfs_iread_extents(tp, ip, whichfork);
4603 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4604 /* make sure we only reflink into a hole. */
4605 ASSERT(got.br_startoff > bno);
4606 ASSERT(got.br_startoff - bno >= len);
4609 ip->i_d.di_nblocks += len;
4610 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4612 if (ifp->if_flags & XFS_IFBROOT) {
4613 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4614 cur->bc_private.b.flags = 0;
4617 got.br_startoff = bno;
4618 got.br_startblock = startblock;
4619 got.br_blockcount = len;
4620 if (flags & XFS_BMAPI_PREALLOC)
4621 got.br_state = XFS_EXT_UNWRITTEN;
4623 got.br_state = XFS_EXT_NORM;
4625 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4626 &cur, &got, &logflags, flags);
4630 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4633 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4634 logflags &= ~XFS_ILOG_DEXT;
4635 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4636 logflags &= ~XFS_ILOG_DBROOT;
4639 xfs_trans_log_inode(tp, ip, logflags);
4641 xfs_btree_del_cursor(cur, error);
4646 * When a delalloc extent is split (e.g., due to a hole punch), the original
4647 * indlen reservation must be shared across the two new extents that are left
4650 * Given the original reservation and the worst case indlen for the two new
4651 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4652 * reservation fairly across the two new extents. If necessary, steal available
4653 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4654 * ores == 1). The number of stolen blocks is returned. The availability and
4655 * subsequent accounting of stolen blocks is the responsibility of the caller.
4657 static xfs_filblks_t
4658 xfs_bmap_split_indlen(
4659 xfs_filblks_t ores, /* original res. */
4660 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4661 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4662 xfs_filblks_t avail) /* stealable blocks */
4664 xfs_filblks_t len1 = *indlen1;
4665 xfs_filblks_t len2 = *indlen2;
4666 xfs_filblks_t nres = len1 + len2; /* new total res. */
4667 xfs_filblks_t stolen = 0;
4668 xfs_filblks_t resfactor;
4671 * Steal as many blocks as we can to try and satisfy the worst case
4672 * indlen for both new extents.
4674 if (ores < nres && avail)
4675 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4678 /* nothing else to do if we've satisfied the new reservation */
4683 * We can't meet the total required reservation for the two extents.
4684 * Calculate the percent of the overall shortage between both extents
4685 * and apply this percentage to each of the requested indlen values.
4686 * This distributes the shortage fairly and reduces the chances that one
4687 * of the two extents is left with nothing when extents are repeatedly
4690 resfactor = (ores * 100);
4691 do_div(resfactor, nres);
4696 ASSERT(len1 + len2 <= ores);
4697 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4700 * Hand out the remainder to each extent. If one of the two reservations
4701 * is zero, we want to make sure that one gets a block first. The loop
4702 * below starts with len1, so hand len2 a block right off the bat if it
4705 ores -= (len1 + len2);
4706 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4707 if (ores && !len2 && *indlen2) {
4712 if (len1 < *indlen1) {
4718 if (len2 < *indlen2) {
4731 xfs_bmap_del_extent_delay(
4732 struct xfs_inode *ip,
4734 struct xfs_iext_cursor *icur,
4735 struct xfs_bmbt_irec *got,
4736 struct xfs_bmbt_irec *del)
4738 struct xfs_mount *mp = ip->i_mount;
4739 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4740 struct xfs_bmbt_irec new;
4741 int64_t da_old, da_new, da_diff = 0;
4742 xfs_fileoff_t del_endoff, got_endoff;
4743 xfs_filblks_t got_indlen, new_indlen, stolen;
4744 int state = xfs_bmap_fork_to_state(whichfork);
4748 XFS_STATS_INC(mp, xs_del_exlist);
4750 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4751 del_endoff = del->br_startoff + del->br_blockcount;
4752 got_endoff = got->br_startoff + got->br_blockcount;
4753 da_old = startblockval(got->br_startblock);
4756 ASSERT(del->br_blockcount > 0);
4757 ASSERT(got->br_startoff <= del->br_startoff);
4758 ASSERT(got_endoff >= del_endoff);
4761 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4763 do_div(rtexts, mp->m_sb.sb_rextsize);
4764 xfs_mod_frextents(mp, rtexts);
4768 * Update the inode delalloc counter now and wait to update the
4769 * sb counters as we might have to borrow some blocks for the
4770 * indirect block accounting.
4772 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4773 -((long)del->br_blockcount), 0,
4774 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4777 ip->i_delayed_blks -= del->br_blockcount;
4779 if (got->br_startoff == del->br_startoff)
4780 state |= BMAP_LEFT_FILLING;
4781 if (got_endoff == del_endoff)
4782 state |= BMAP_RIGHT_FILLING;
4784 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4785 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4787 * Matches the whole extent. Delete the entry.
4789 xfs_iext_remove(ip, icur, state);
4790 xfs_iext_prev(ifp, icur);
4792 case BMAP_LEFT_FILLING:
4794 * Deleting the first part of the extent.
4796 got->br_startoff = del_endoff;
4797 got->br_blockcount -= del->br_blockcount;
4798 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4799 got->br_blockcount), da_old);
4800 got->br_startblock = nullstartblock((int)da_new);
4801 xfs_iext_update_extent(ip, state, icur, got);
4803 case BMAP_RIGHT_FILLING:
4805 * Deleting the last part of the extent.
4807 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4808 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4809 got->br_blockcount), da_old);
4810 got->br_startblock = nullstartblock((int)da_new);
4811 xfs_iext_update_extent(ip, state, icur, got);
4815 * Deleting the middle of the extent.
4817 * Distribute the original indlen reservation across the two new
4818 * extents. Steal blocks from the deleted extent if necessary.
4819 * Stealing blocks simply fudges the fdblocks accounting below.
4820 * Warn if either of the new indlen reservations is zero as this
4821 * can lead to delalloc problems.
4823 got->br_blockcount = del->br_startoff - got->br_startoff;
4824 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4826 new.br_blockcount = got_endoff - del_endoff;
4827 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4829 WARN_ON_ONCE(!got_indlen || !new_indlen);
4830 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4831 del->br_blockcount);
4833 got->br_startblock = nullstartblock((int)got_indlen);
4835 new.br_startoff = del_endoff;
4836 new.br_state = got->br_state;
4837 new.br_startblock = nullstartblock((int)new_indlen);
4839 xfs_iext_update_extent(ip, state, icur, got);
4840 xfs_iext_next(ifp, icur);
4841 xfs_iext_insert(ip, icur, &new, state);
4843 da_new = got_indlen + new_indlen - stolen;
4844 del->br_blockcount -= stolen;
4848 ASSERT(da_old >= da_new);
4849 da_diff = da_old - da_new;
4851 da_diff += del->br_blockcount;
4853 xfs_mod_fdblocks(mp, da_diff, false);
4854 xfs_mod_delalloc(mp, -da_diff);
4860 xfs_bmap_del_extent_cow(
4861 struct xfs_inode *ip,
4862 struct xfs_iext_cursor *icur,
4863 struct xfs_bmbt_irec *got,
4864 struct xfs_bmbt_irec *del)
4866 struct xfs_mount *mp = ip->i_mount;
4867 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4868 struct xfs_bmbt_irec new;
4869 xfs_fileoff_t del_endoff, got_endoff;
4870 int state = BMAP_COWFORK;
4872 XFS_STATS_INC(mp, xs_del_exlist);
4874 del_endoff = del->br_startoff + del->br_blockcount;
4875 got_endoff = got->br_startoff + got->br_blockcount;
4877 ASSERT(del->br_blockcount > 0);
4878 ASSERT(got->br_startoff <= del->br_startoff);
4879 ASSERT(got_endoff >= del_endoff);
4880 ASSERT(!isnullstartblock(got->br_startblock));
4882 if (got->br_startoff == del->br_startoff)
4883 state |= BMAP_LEFT_FILLING;
4884 if (got_endoff == del_endoff)
4885 state |= BMAP_RIGHT_FILLING;
4887 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4888 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4890 * Matches the whole extent. Delete the entry.
4892 xfs_iext_remove(ip, icur, state);
4893 xfs_iext_prev(ifp, icur);
4895 case BMAP_LEFT_FILLING:
4897 * Deleting the first part of the extent.
4899 got->br_startoff = del_endoff;
4900 got->br_blockcount -= del->br_blockcount;
4901 got->br_startblock = del->br_startblock + del->br_blockcount;
4902 xfs_iext_update_extent(ip, state, icur, got);
4904 case BMAP_RIGHT_FILLING:
4906 * Deleting the last part of the extent.
4908 got->br_blockcount -= del->br_blockcount;
4909 xfs_iext_update_extent(ip, state, icur, got);
4913 * Deleting the middle of the extent.
4915 got->br_blockcount = del->br_startoff - got->br_startoff;
4917 new.br_startoff = del_endoff;
4918 new.br_blockcount = got_endoff - del_endoff;
4919 new.br_state = got->br_state;
4920 new.br_startblock = del->br_startblock + del->br_blockcount;
4922 xfs_iext_update_extent(ip, state, icur, got);
4923 xfs_iext_next(ifp, icur);
4924 xfs_iext_insert(ip, icur, &new, state);
4927 ip->i_delayed_blks -= del->br_blockcount;
4931 * Called by xfs_bmapi to update file extent records and the btree
4932 * after removing space.
4934 STATIC int /* error */
4935 xfs_bmap_del_extent_real(
4936 xfs_inode_t *ip, /* incore inode pointer */
4937 xfs_trans_t *tp, /* current transaction pointer */
4938 struct xfs_iext_cursor *icur,
4939 xfs_btree_cur_t *cur, /* if null, not a btree */
4940 xfs_bmbt_irec_t *del, /* data to remove from extents */
4941 int *logflagsp, /* inode logging flags */
4942 int whichfork, /* data or attr fork */
4943 int bflags) /* bmapi flags */
4945 xfs_fsblock_t del_endblock=0; /* first block past del */
4946 xfs_fileoff_t del_endoff; /* first offset past del */
4947 int do_fx; /* free extent at end of routine */
4948 int error; /* error return value */
4949 int flags = 0;/* inode logging flags */
4950 struct xfs_bmbt_irec got; /* current extent entry */
4951 xfs_fileoff_t got_endoff; /* first offset past got */
4952 int i; /* temp state */
4953 struct xfs_ifork *ifp; /* inode fork pointer */
4954 xfs_mount_t *mp; /* mount structure */
4955 xfs_filblks_t nblks; /* quota/sb block count */
4956 xfs_bmbt_irec_t new; /* new record to be inserted */
4958 uint qfield; /* quota field to update */
4959 int state = xfs_bmap_fork_to_state(whichfork);
4960 struct xfs_bmbt_irec old;
4963 XFS_STATS_INC(mp, xs_del_exlist);
4965 ifp = XFS_IFORK_PTR(ip, whichfork);
4966 ASSERT(del->br_blockcount > 0);
4967 xfs_iext_get_extent(ifp, icur, &got);
4968 ASSERT(got.br_startoff <= del->br_startoff);
4969 del_endoff = del->br_startoff + del->br_blockcount;
4970 got_endoff = got.br_startoff + got.br_blockcount;
4971 ASSERT(got_endoff >= del_endoff);
4972 ASSERT(!isnullstartblock(got.br_startblock));
4977 * If it's the case where the directory code is running with no block
4978 * reservation, and the deleted block is in the middle of its extent,
4979 * and the resulting insert of an extent would cause transformation to
4980 * btree format, then reject it. The calling code will then swap blocks
4981 * around instead. We have to do this now, rather than waiting for the
4982 * conversion to btree format, since the transaction will be dirty then.
4984 if (tp->t_blk_res == 0 &&
4985 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
4986 XFS_IFORK_NEXTENTS(ip, whichfork) >=
4987 XFS_IFORK_MAXEXT(ip, whichfork) &&
4988 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4991 flags = XFS_ILOG_CORE;
4992 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4997 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
5000 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
5004 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5008 nblks = len * mp->m_sb.sb_rextsize;
5009 qfield = XFS_TRANS_DQ_RTBCOUNT;
5012 nblks = del->br_blockcount;
5013 qfield = XFS_TRANS_DQ_BCOUNT;
5016 del_endblock = del->br_startblock + del->br_blockcount;
5018 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5021 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5024 if (got.br_startoff == del->br_startoff)
5025 state |= BMAP_LEFT_FILLING;
5026 if (got_endoff == del_endoff)
5027 state |= BMAP_RIGHT_FILLING;
5029 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5030 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5032 * Matches the whole extent. Delete the entry.
5034 xfs_iext_remove(ip, icur, state);
5035 xfs_iext_prev(ifp, icur);
5036 XFS_IFORK_NEXT_SET(ip, whichfork,
5037 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5038 flags |= XFS_ILOG_CORE;
5040 flags |= xfs_ilog_fext(whichfork);
5043 if ((error = xfs_btree_delete(cur, &i)))
5045 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5047 case BMAP_LEFT_FILLING:
5049 * Deleting the first part of the extent.
5051 got.br_startoff = del_endoff;
5052 got.br_startblock = del_endblock;
5053 got.br_blockcount -= del->br_blockcount;
5054 xfs_iext_update_extent(ip, state, icur, &got);
5056 flags |= xfs_ilog_fext(whichfork);
5059 error = xfs_bmbt_update(cur, &got);
5063 case BMAP_RIGHT_FILLING:
5065 * Deleting the last part of the extent.
5067 got.br_blockcount -= del->br_blockcount;
5068 xfs_iext_update_extent(ip, state, icur, &got);
5070 flags |= xfs_ilog_fext(whichfork);
5073 error = xfs_bmbt_update(cur, &got);
5079 * Deleting the middle of the extent.
5083 got.br_blockcount = del->br_startoff - got.br_startoff;
5084 xfs_iext_update_extent(ip, state, icur, &got);
5086 new.br_startoff = del_endoff;
5087 new.br_blockcount = got_endoff - del_endoff;
5088 new.br_state = got.br_state;
5089 new.br_startblock = del_endblock;
5091 flags |= XFS_ILOG_CORE;
5093 error = xfs_bmbt_update(cur, &got);
5096 error = xfs_btree_increment(cur, 0, &i);
5099 cur->bc_rec.b = new;
5100 error = xfs_btree_insert(cur, &i);
5101 if (error && error != -ENOSPC)
5104 * If get no-space back from btree insert, it tried a
5105 * split, and we have a zero block reservation. Fix up
5106 * our state and return the error.
5108 if (error == -ENOSPC) {
5110 * Reset the cursor, don't trust it after any
5113 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5116 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5118 * Update the btree record back
5119 * to the original value.
5121 error = xfs_bmbt_update(cur, &old);
5125 * Reset the extent record back
5126 * to the original value.
5128 xfs_iext_update_extent(ip, state, icur, &old);
5133 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5135 flags |= xfs_ilog_fext(whichfork);
5136 XFS_IFORK_NEXT_SET(ip, whichfork,
5137 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5138 xfs_iext_next(ifp, icur);
5139 xfs_iext_insert(ip, icur, &new, state);
5143 /* remove reverse mapping */
5144 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5147 * If we need to, add to list of extents to delete.
5149 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5150 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5151 xfs_refcount_decrease_extent(tp, del);
5153 __xfs_bmap_add_free(tp, del->br_startblock,
5154 del->br_blockcount, NULL,
5155 (bflags & XFS_BMAPI_NODISCARD) ||
5156 del->br_state == XFS_EXT_UNWRITTEN);
5161 * Adjust inode # blocks in the file.
5164 ip->i_d.di_nblocks -= nblks;
5166 * Adjust quota data.
5168 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5169 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5177 * Unmap (remove) blocks from a file.
5178 * If nexts is nonzero then the number of extents to remove is limited to
5179 * that value. If not all extents in the block range can be removed then
5184 struct xfs_trans *tp, /* transaction pointer */
5185 struct xfs_inode *ip, /* incore inode */
5186 xfs_fileoff_t start, /* first file offset deleted */
5187 xfs_filblks_t *rlen, /* i/o: amount remaining */
5188 int flags, /* misc flags */
5189 xfs_extnum_t nexts) /* number of extents max */
5191 struct xfs_btree_cur *cur; /* bmap btree cursor */
5192 struct xfs_bmbt_irec del; /* extent being deleted */
5193 int error; /* error return value */
5194 xfs_extnum_t extno; /* extent number in list */
5195 struct xfs_bmbt_irec got; /* current extent record */
5196 struct xfs_ifork *ifp; /* inode fork pointer */
5197 int isrt; /* freeing in rt area */
5198 int logflags; /* transaction logging flags */
5199 xfs_extlen_t mod; /* rt extent offset */
5200 struct xfs_mount *mp; /* mount structure */
5201 int tmp_logflags; /* partial logging flags */
5202 int wasdel; /* was a delayed alloc extent */
5203 int whichfork; /* data or attribute fork */
5205 xfs_filblks_t len = *rlen; /* length to unmap in file */
5206 xfs_fileoff_t max_len;
5207 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5209 struct xfs_iext_cursor icur;
5212 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5214 whichfork = xfs_bmapi_whichfork(flags);
5215 ASSERT(whichfork != XFS_COW_FORK);
5216 ifp = XFS_IFORK_PTR(ip, whichfork);
5218 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5219 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5220 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5222 return -EFSCORRUPTED;
5225 if (XFS_FORCED_SHUTDOWN(mp))
5228 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5233 * Guesstimate how many blocks we can unmap without running the risk of
5234 * blowing out the transaction with a mix of EFIs and reflink
5237 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5238 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5242 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5243 (error = xfs_iread_extents(tp, ip, whichfork)))
5245 if (xfs_iext_count(ifp) == 0) {
5249 XFS_STATS_INC(mp, xs_blk_unmap);
5250 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5253 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5260 if (ifp->if_flags & XFS_IFBROOT) {
5261 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5262 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5263 cur->bc_private.b.flags = 0;
5269 * Synchronize by locking the bitmap inode.
5271 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5272 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5273 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5274 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5278 while (end != (xfs_fileoff_t)-1 && end >= start &&
5279 (nexts == 0 || extno < nexts) && max_len > 0) {
5281 * Is the found extent after a hole in which end lives?
5282 * Just back up to the previous extent, if so.
5284 if (got.br_startoff > end &&
5285 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5290 * Is the last block of this extent before the range
5291 * we're supposed to delete? If so, we're done.
5293 end = XFS_FILEOFF_MIN(end,
5294 got.br_startoff + got.br_blockcount - 1);
5298 * Then deal with the (possibly delayed) allocated space
5302 wasdel = isnullstartblock(del.br_startblock);
5305 * Make sure we don't touch multiple AGF headers out of order
5306 * in a single transaction, as that could cause AB-BA deadlocks.
5309 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5310 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5314 if (got.br_startoff < start) {
5315 del.br_startoff = start;
5316 del.br_blockcount -= start - got.br_startoff;
5318 del.br_startblock += start - got.br_startoff;
5320 if (del.br_startoff + del.br_blockcount > end + 1)
5321 del.br_blockcount = end + 1 - del.br_startoff;
5323 /* How much can we safely unmap? */
5324 if (max_len < del.br_blockcount) {
5325 del.br_startoff += del.br_blockcount - max_len;
5327 del.br_startblock += del.br_blockcount - max_len;
5328 del.br_blockcount = max_len;
5334 sum = del.br_startblock + del.br_blockcount;
5335 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5338 * Realtime extent not lined up at the end.
5339 * The extent could have been split into written
5340 * and unwritten pieces, or we could just be
5341 * unmapping part of it. But we can't really
5342 * get rid of part of a realtime extent.
5344 if (del.br_state == XFS_EXT_UNWRITTEN) {
5346 * This piece is unwritten, or we're not
5347 * using unwritten extents. Skip over it.
5350 end -= mod > del.br_blockcount ?
5351 del.br_blockcount : mod;
5352 if (end < got.br_startoff &&
5353 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5360 * It's written, turn it unwritten.
5361 * This is better than zeroing it.
5363 ASSERT(del.br_state == XFS_EXT_NORM);
5364 ASSERT(tp->t_blk_res > 0);
5366 * If this spans a realtime extent boundary,
5367 * chop it back to the start of the one we end at.
5369 if (del.br_blockcount > mod) {
5370 del.br_startoff += del.br_blockcount - mod;
5371 del.br_startblock += del.br_blockcount - mod;
5372 del.br_blockcount = mod;
5374 del.br_state = XFS_EXT_UNWRITTEN;
5375 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5376 whichfork, &icur, &cur, &del,
5382 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5385 * Realtime extent is lined up at the end but not
5386 * at the front. We'll get rid of full extents if
5389 mod = mp->m_sb.sb_rextsize - mod;
5390 if (del.br_blockcount > mod) {
5391 del.br_blockcount -= mod;
5392 del.br_startoff += mod;
5393 del.br_startblock += mod;
5394 } else if (del.br_startoff == start &&
5395 (del.br_state == XFS_EXT_UNWRITTEN ||
5396 tp->t_blk_res == 0)) {
5398 * Can't make it unwritten. There isn't
5399 * a full extent here so just skip it.
5401 ASSERT(end >= del.br_blockcount);
5402 end -= del.br_blockcount;
5403 if (got.br_startoff > end &&
5404 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5409 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5410 struct xfs_bmbt_irec prev;
5413 * This one is already unwritten.
5414 * It must have a written left neighbor.
5415 * Unwrite the killed part of that one and
5418 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5420 ASSERT(prev.br_state == XFS_EXT_NORM);
5421 ASSERT(!isnullstartblock(prev.br_startblock));
5422 ASSERT(del.br_startblock ==
5423 prev.br_startblock + prev.br_blockcount);
5424 if (prev.br_startoff < start) {
5425 mod = start - prev.br_startoff;
5426 prev.br_blockcount -= mod;
5427 prev.br_startblock += mod;
5428 prev.br_startoff = start;
5430 prev.br_state = XFS_EXT_UNWRITTEN;
5431 error = xfs_bmap_add_extent_unwritten_real(tp,
5432 ip, whichfork, &icur, &cur,
5438 ASSERT(del.br_state == XFS_EXT_NORM);
5439 del.br_state = XFS_EXT_UNWRITTEN;
5440 error = xfs_bmap_add_extent_unwritten_real(tp,
5441 ip, whichfork, &icur, &cur,
5451 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5454 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5455 &del, &tmp_logflags, whichfork,
5457 logflags |= tmp_logflags;
5463 max_len -= del.br_blockcount;
5464 end = del.br_startoff - 1;
5467 * If not done go on to the next (previous) record.
5469 if (end != (xfs_fileoff_t)-1 && end >= start) {
5470 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5471 (got.br_startoff > end &&
5472 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5479 if (done || end == (xfs_fileoff_t)-1 || end < start)
5482 *rlen = end - start + 1;
5485 * Convert to a btree if necessary.
5487 if (xfs_bmap_needs_btree(ip, whichfork)) {
5488 ASSERT(cur == NULL);
5489 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5490 &tmp_logflags, whichfork);
5491 logflags |= tmp_logflags;
5493 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5499 * Log everything. Do this after conversion, there's no point in
5500 * logging the extent records if we've converted to btree format.
5502 if ((logflags & xfs_ilog_fext(whichfork)) &&
5503 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5504 logflags &= ~xfs_ilog_fext(whichfork);
5505 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5506 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5507 logflags &= ~xfs_ilog_fbroot(whichfork);
5509 * Log inode even in the error case, if the transaction
5510 * is dirty we'll need to shut down the filesystem.
5513 xfs_trans_log_inode(tp, ip, logflags);
5516 cur->bc_private.b.allocated = 0;
5517 xfs_btree_del_cursor(cur, error);
5522 /* Unmap a range of a file. */
5526 struct xfs_inode *ip,
5535 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5541 * Determine whether an extent shift can be accomplished by a merge with the
5542 * extent that precedes the target hole of the shift.
5546 struct xfs_bmbt_irec *left, /* preceding extent */
5547 struct xfs_bmbt_irec *got, /* current extent to shift */
5548 xfs_fileoff_t shift) /* shift fsb */
5550 xfs_fileoff_t startoff;
5552 startoff = got->br_startoff - shift;
5555 * The extent, once shifted, must be adjacent in-file and on-disk with
5556 * the preceding extent.
5558 if ((left->br_startoff + left->br_blockcount != startoff) ||
5559 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5560 (left->br_state != got->br_state) ||
5561 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5568 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5569 * hole in the file. If an extent shift would result in the extent being fully
5570 * adjacent to the extent that currently precedes the hole, we can merge with
5571 * the preceding extent rather than do the shift.
5573 * This function assumes the caller has verified a shift-by-merge is possible
5574 * with the provided extents via xfs_bmse_can_merge().
5578 struct xfs_trans *tp,
5579 struct xfs_inode *ip,
5581 xfs_fileoff_t shift, /* shift fsb */
5582 struct xfs_iext_cursor *icur,
5583 struct xfs_bmbt_irec *got, /* extent to shift */
5584 struct xfs_bmbt_irec *left, /* preceding extent */
5585 struct xfs_btree_cur *cur,
5586 int *logflags) /* output */
5588 struct xfs_bmbt_irec new;
5589 xfs_filblks_t blockcount;
5591 struct xfs_mount *mp = ip->i_mount;
5593 blockcount = left->br_blockcount + got->br_blockcount;
5595 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5596 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5597 ASSERT(xfs_bmse_can_merge(left, got, shift));
5600 new.br_blockcount = blockcount;
5603 * Update the on-disk extent count, the btree if necessary and log the
5606 XFS_IFORK_NEXT_SET(ip, whichfork,
5607 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5608 *logflags |= XFS_ILOG_CORE;
5610 *logflags |= XFS_ILOG_DEXT;
5614 /* lookup and remove the extent to merge */
5615 error = xfs_bmbt_lookup_eq(cur, got, &i);
5618 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5620 error = xfs_btree_delete(cur, &i);
5623 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5625 /* lookup and update size of the previous extent */
5626 error = xfs_bmbt_lookup_eq(cur, left, &i);
5629 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5631 error = xfs_bmbt_update(cur, &new);
5635 /* change to extent format if required after extent removal */
5636 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5641 xfs_iext_remove(ip, icur, 0);
5642 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5643 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5646 /* update reverse mapping. rmap functions merge the rmaps for us */
5647 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5648 memcpy(&new, got, sizeof(new));
5649 new.br_startoff = left->br_startoff + left->br_blockcount;
5650 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5655 xfs_bmap_shift_update_extent(
5656 struct xfs_trans *tp,
5657 struct xfs_inode *ip,
5659 struct xfs_iext_cursor *icur,
5660 struct xfs_bmbt_irec *got,
5661 struct xfs_btree_cur *cur,
5663 xfs_fileoff_t startoff)
5665 struct xfs_mount *mp = ip->i_mount;
5666 struct xfs_bmbt_irec prev = *got;
5669 *logflags |= XFS_ILOG_CORE;
5671 got->br_startoff = startoff;
5674 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5677 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5679 error = xfs_bmbt_update(cur, got);
5683 *logflags |= XFS_ILOG_DEXT;
5686 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5689 /* update reverse mapping */
5690 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5691 xfs_rmap_map_extent(tp, ip, whichfork, got);
5696 xfs_bmap_collapse_extents(
5697 struct xfs_trans *tp,
5698 struct xfs_inode *ip,
5699 xfs_fileoff_t *next_fsb,
5700 xfs_fileoff_t offset_shift_fsb,
5703 int whichfork = XFS_DATA_FORK;
5704 struct xfs_mount *mp = ip->i_mount;
5705 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5706 struct xfs_btree_cur *cur = NULL;
5707 struct xfs_bmbt_irec got, prev;
5708 struct xfs_iext_cursor icur;
5709 xfs_fileoff_t new_startoff;
5713 if (unlikely(XFS_TEST_ERROR(
5714 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5715 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5716 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5717 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5718 return -EFSCORRUPTED;
5721 if (XFS_FORCED_SHUTDOWN(mp))
5724 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5726 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5727 error = xfs_iread_extents(tp, ip, whichfork);
5732 if (ifp->if_flags & XFS_IFBROOT) {
5733 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5734 cur->bc_private.b.flags = 0;
5737 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5741 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5744 new_startoff = got.br_startoff - offset_shift_fsb;
5745 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5746 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5751 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5752 error = xfs_bmse_merge(tp, ip, whichfork,
5753 offset_shift_fsb, &icur, &got, &prev,
5760 if (got.br_startoff < offset_shift_fsb) {
5766 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5767 cur, &logflags, new_startoff);
5772 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5777 *next_fsb = got.br_startoff;
5780 xfs_btree_del_cursor(cur, error);
5782 xfs_trans_log_inode(tp, ip, logflags);
5786 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5788 xfs_bmap_can_insert_extents(
5789 struct xfs_inode *ip,
5791 xfs_fileoff_t shift)
5793 struct xfs_bmbt_irec got;
5797 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5799 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5802 xfs_ilock(ip, XFS_ILOCK_EXCL);
5803 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5804 if (!error && !is_empty && got.br_startoff >= off &&
5805 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5807 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5813 xfs_bmap_insert_extents(
5814 struct xfs_trans *tp,
5815 struct xfs_inode *ip,
5816 xfs_fileoff_t *next_fsb,
5817 xfs_fileoff_t offset_shift_fsb,
5819 xfs_fileoff_t stop_fsb)
5821 int whichfork = XFS_DATA_FORK;
5822 struct xfs_mount *mp = ip->i_mount;
5823 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5824 struct xfs_btree_cur *cur = NULL;
5825 struct xfs_bmbt_irec got, next;
5826 struct xfs_iext_cursor icur;
5827 xfs_fileoff_t new_startoff;
5831 if (unlikely(XFS_TEST_ERROR(
5832 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5833 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5834 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5835 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5836 return -EFSCORRUPTED;
5839 if (XFS_FORCED_SHUTDOWN(mp))
5842 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5844 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5845 error = xfs_iread_extents(tp, ip, whichfork);
5850 if (ifp->if_flags & XFS_IFBROOT) {
5851 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5852 cur->bc_private.b.flags = 0;
5855 if (*next_fsb == NULLFSBLOCK) {
5856 xfs_iext_last(ifp, &icur);
5857 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5858 stop_fsb > got.br_startoff) {
5863 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5868 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5871 if (stop_fsb >= got.br_startoff + got.br_blockcount) {
5876 new_startoff = got.br_startoff + offset_shift_fsb;
5877 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5878 if (new_startoff + got.br_blockcount > next.br_startoff) {
5884 * Unlike a left shift (which involves a hole punch), a right
5885 * shift does not modify extent neighbors in any way. We should
5886 * never find mergeable extents in this scenario. Check anyways
5887 * and warn if we encounter two extents that could be one.
5889 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5893 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5894 cur, &logflags, new_startoff);
5898 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5899 stop_fsb >= got.br_startoff + got.br_blockcount) {
5904 *next_fsb = got.br_startoff;
5907 xfs_btree_del_cursor(cur, error);
5909 xfs_trans_log_inode(tp, ip, logflags);
5914 * Splits an extent into two extents at split_fsb block such that it is the
5915 * first block of the current_ext. @ext is a target extent to be split.
5916 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5917 * hole or the first block of extents, just return 0.
5920 xfs_bmap_split_extent_at(
5921 struct xfs_trans *tp,
5922 struct xfs_inode *ip,
5923 xfs_fileoff_t split_fsb)
5925 int whichfork = XFS_DATA_FORK;
5926 struct xfs_btree_cur *cur = NULL;
5927 struct xfs_bmbt_irec got;
5928 struct xfs_bmbt_irec new; /* split extent */
5929 struct xfs_mount *mp = ip->i_mount;
5930 struct xfs_ifork *ifp;
5931 xfs_fsblock_t gotblkcnt; /* new block count for got */
5932 struct xfs_iext_cursor icur;
5937 if (unlikely(XFS_TEST_ERROR(
5938 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5939 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5940 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5941 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5942 XFS_ERRLEVEL_LOW, mp);
5943 return -EFSCORRUPTED;
5946 if (XFS_FORCED_SHUTDOWN(mp))
5949 ifp = XFS_IFORK_PTR(ip, whichfork);
5950 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5951 /* Read in all the extents */
5952 error = xfs_iread_extents(tp, ip, whichfork);
5958 * If there are not extents, or split_fsb lies in a hole we are done.
5960 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5961 got.br_startoff >= split_fsb)
5964 gotblkcnt = split_fsb - got.br_startoff;
5965 new.br_startoff = split_fsb;
5966 new.br_startblock = got.br_startblock + gotblkcnt;
5967 new.br_blockcount = got.br_blockcount - gotblkcnt;
5968 new.br_state = got.br_state;
5970 if (ifp->if_flags & XFS_IFBROOT) {
5971 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5972 cur->bc_private.b.flags = 0;
5973 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5976 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5979 got.br_blockcount = gotblkcnt;
5980 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5983 logflags = XFS_ILOG_CORE;
5985 error = xfs_bmbt_update(cur, &got);
5989 logflags |= XFS_ILOG_DEXT;
5991 /* Add new extent */
5992 xfs_iext_next(ifp, &icur);
5993 xfs_iext_insert(ip, &icur, &new, 0);
5994 XFS_IFORK_NEXT_SET(ip, whichfork,
5995 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5998 error = xfs_bmbt_lookup_eq(cur, &new, &i);
6001 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
6002 error = xfs_btree_insert(cur, &i);
6005 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6009 * Convert to a btree if necessary.
6011 if (xfs_bmap_needs_btree(ip, whichfork)) {
6012 int tmp_logflags; /* partial log flag return val */
6014 ASSERT(cur == NULL);
6015 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6016 &tmp_logflags, whichfork);
6017 logflags |= tmp_logflags;
6022 cur->bc_private.b.allocated = 0;
6023 xfs_btree_del_cursor(cur, error);
6027 xfs_trans_log_inode(tp, ip, logflags);
6032 xfs_bmap_split_extent(
6033 struct xfs_inode *ip,
6034 xfs_fileoff_t split_fsb)
6036 struct xfs_mount *mp = ip->i_mount;
6037 struct xfs_trans *tp;
6040 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6041 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6045 xfs_ilock(ip, XFS_ILOCK_EXCL);
6046 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6048 error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
6052 return xfs_trans_commit(tp);
6055 xfs_trans_cancel(tp);
6059 /* Deferred mapping is only for real extents in the data fork. */
6061 xfs_bmap_is_update_needed(
6062 struct xfs_bmbt_irec *bmap)
6064 return bmap->br_startblock != HOLESTARTBLOCK &&
6065 bmap->br_startblock != DELAYSTARTBLOCK;
6068 /* Record a bmap intent. */
6071 struct xfs_trans *tp,
6072 enum xfs_bmap_intent_type type,
6073 struct xfs_inode *ip,
6075 struct xfs_bmbt_irec *bmap)
6077 struct xfs_bmap_intent *bi;
6079 trace_xfs_bmap_defer(tp->t_mountp,
6080 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6082 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6083 ip->i_ino, whichfork,
6085 bmap->br_blockcount,
6088 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
6089 INIT_LIST_HEAD(&bi->bi_list);
6092 bi->bi_whichfork = whichfork;
6093 bi->bi_bmap = *bmap;
6095 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6099 /* Map an extent into a file. */
6101 xfs_bmap_map_extent(
6102 struct xfs_trans *tp,
6103 struct xfs_inode *ip,
6104 struct xfs_bmbt_irec *PREV)
6106 if (!xfs_bmap_is_update_needed(PREV))
6109 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6112 /* Unmap an extent out of a file. */
6114 xfs_bmap_unmap_extent(
6115 struct xfs_trans *tp,
6116 struct xfs_inode *ip,
6117 struct xfs_bmbt_irec *PREV)
6119 if (!xfs_bmap_is_update_needed(PREV))
6122 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6126 * Process one of the deferred bmap operations. We pass back the
6127 * btree cursor to maintain our lock on the bmapbt between calls.
6130 xfs_bmap_finish_one(
6131 struct xfs_trans *tp,
6132 struct xfs_inode *ip,
6133 enum xfs_bmap_intent_type type,
6135 xfs_fileoff_t startoff,
6136 xfs_fsblock_t startblock,
6137 xfs_filblks_t *blockcount,
6142 ASSERT(tp->t_firstblock == NULLFSBLOCK);
6144 trace_xfs_bmap_deferred(tp->t_mountp,
6145 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6146 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6147 ip->i_ino, whichfork, startoff, *blockcount, state);
6149 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6150 return -EFSCORRUPTED;
6152 if (XFS_TEST_ERROR(false, tp->t_mountp,
6153 XFS_ERRTAG_BMAP_FINISH_ONE))
6158 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6162 case XFS_BMAP_UNMAP:
6163 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6164 XFS_BMAPI_REMAP, 1);
6168 error = -EFSCORRUPTED;
6174 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6176 xfs_bmap_validate_extent(
6177 struct xfs_inode *ip,
6179 struct xfs_bmbt_irec *irec)
6181 struct xfs_mount *mp = ip->i_mount;
6182 xfs_fsblock_t endfsb;
6185 isrt = XFS_IS_REALTIME_INODE(ip);
6186 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6188 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6189 return __this_address;
6190 if (!xfs_verify_rtbno(mp, endfsb))
6191 return __this_address;
6193 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6194 return __this_address;
6195 if (!xfs_verify_fsbno(mp, endfsb))
6196 return __this_address;
6197 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6198 XFS_FSB_TO_AGNO(mp, endfsb))
6199 return __this_address;
6201 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6202 return __this_address;