1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtalloc.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
34 #include "xfs_ag_resv.h"
35 #include "xfs_refcount.h"
36 #include "xfs_icache.h"
37 #include "xfs_iomap.h"
40 kmem_zone_t *xfs_bmap_free_item_zone;
43 * Miscellaneous helper functions
47 * Compute and fill in the value of the maximum depth of a bmap btree
48 * in this filesystem. Done once, during mount.
51 xfs_bmap_compute_maxlevels(
52 xfs_mount_t *mp, /* file system mount structure */
53 int whichfork) /* data or attr fork */
55 int level; /* btree level */
56 uint maxblocks; /* max blocks at this level */
57 uint maxleafents; /* max leaf entries possible */
58 int maxrootrecs; /* max records in root block */
59 int minleafrecs; /* min records in leaf block */
60 int minnoderecs; /* min records in node block */
61 int sz; /* root block size */
64 * The maximum number of extents in a file, hence the maximum
65 * number of leaf entries, is controlled by the type of di_nextents
66 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
67 * (a signed 16-bit number, xfs_aextnum_t).
69 * Note that we can no longer assume that if we are in ATTR1 that
70 * the fork offset of all the inodes will be
71 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
72 * with ATTR2 and then mounted back with ATTR1, keeping the
73 * di_forkoff's fixed but probably at various positions. Therefore,
74 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
75 * of a minimum size available.
77 if (whichfork == XFS_DATA_FORK) {
78 maxleafents = MAXEXTNUM;
79 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
81 maxleafents = MAXAEXTNUM;
82 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
84 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
85 minleafrecs = mp->m_bmap_dmnr[0];
86 minnoderecs = mp->m_bmap_dmnr[1];
87 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
88 for (level = 1; maxblocks > 1; level++) {
89 if (maxblocks <= maxrootrecs)
92 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
94 mp->m_bm_maxlevels[whichfork] = level;
97 STATIC int /* error */
99 struct xfs_btree_cur *cur,
100 struct xfs_bmbt_irec *irec,
101 int *stat) /* success/failure */
103 cur->bc_rec.b = *irec;
104 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
107 STATIC int /* error */
108 xfs_bmbt_lookup_first(
109 struct xfs_btree_cur *cur,
110 int *stat) /* success/failure */
112 cur->bc_rec.b.br_startoff = 0;
113 cur->bc_rec.b.br_startblock = 0;
114 cur->bc_rec.b.br_blockcount = 0;
115 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
119 * Check if the inode needs to be converted to btree format.
121 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
123 return whichfork != XFS_COW_FORK &&
124 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
125 XFS_IFORK_NEXTENTS(ip, whichfork) >
126 XFS_IFORK_MAXEXT(ip, whichfork);
130 * Check if the inode should be converted to extent format.
132 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
134 return whichfork != XFS_COW_FORK &&
135 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
136 XFS_IFORK_NEXTENTS(ip, whichfork) <=
137 XFS_IFORK_MAXEXT(ip, whichfork);
141 * Update the record referred to by cur to the value given by irec
142 * This either works (return 0) or gets an EFSCORRUPTED error.
146 struct xfs_btree_cur *cur,
147 struct xfs_bmbt_irec *irec)
149 union xfs_btree_rec rec;
151 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
152 return xfs_btree_update(cur, &rec);
156 * Compute the worst-case number of indirect blocks that will be used
157 * for ip's delayed extent of length "len".
160 xfs_bmap_worst_indlen(
161 xfs_inode_t *ip, /* incore inode pointer */
162 xfs_filblks_t len) /* delayed extent length */
164 int level; /* btree level number */
165 int maxrecs; /* maximum record count at this level */
166 xfs_mount_t *mp; /* mount structure */
167 xfs_filblks_t rval; /* return value */
170 maxrecs = mp->m_bmap_dmxr[0];
171 for (level = 0, rval = 0;
172 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
175 do_div(len, maxrecs);
178 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
181 maxrecs = mp->m_bmap_dmxr[1];
187 * Calculate the default attribute fork offset for newly created inodes.
190 xfs_default_attroffset(
191 struct xfs_inode *ip)
193 struct xfs_mount *mp = ip->i_mount;
196 if (mp->m_sb.sb_inodesize == 256) {
197 offset = XFS_LITINO(mp, ip->i_d.di_version) -
198 XFS_BMDR_SPACE_CALC(MINABTPTRS);
200 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
203 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
208 * Helper routine to reset inode di_forkoff field when switching
209 * attribute fork from local to extent format - we reset it where
210 * possible to make space available for inline data fork extents.
213 xfs_bmap_forkoff_reset(
217 if (whichfork == XFS_ATTR_FORK &&
218 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
219 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
220 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
222 if (dfl_forkoff > ip->i_d.di_forkoff)
223 ip->i_d.di_forkoff = dfl_forkoff;
228 STATIC struct xfs_buf *
230 struct xfs_btree_cur *cur,
233 struct xfs_log_item *lip;
239 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
240 if (!cur->bc_bufs[i])
242 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
243 return cur->bc_bufs[i];
246 /* Chase down all the log items to see if the bp is there */
247 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
248 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
250 if (bip->bli_item.li_type == XFS_LI_BUF &&
251 XFS_BUF_ADDR(bip->bli_buf) == bno)
260 struct xfs_btree_block *block,
266 __be64 *pp, *thispa; /* pointer to block address */
267 xfs_bmbt_key_t *prevp, *keyp;
269 ASSERT(be16_to_cpu(block->bb_level) > 0);
272 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
273 dmxr = mp->m_bmap_dmxr[0];
274 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
277 ASSERT(be64_to_cpu(prevp->br_startoff) <
278 be64_to_cpu(keyp->br_startoff));
283 * Compare the block numbers to see if there are dups.
286 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
288 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
290 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
292 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
294 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
295 if (*thispa == *pp) {
296 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
298 (unsigned long long)be64_to_cpu(*thispa));
299 xfs_err(mp, "%s: ptrs are equal in node\n",
301 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
308 * Check that the extents for the inode ip are in the right order in all
309 * btree leaves. THis becomes prohibitively expensive for large extent count
310 * files, so don't bother with inodes that have more than 10,000 extents in
311 * them. The btree record ordering checks will still be done, so for such large
312 * bmapbt constructs that is going to catch most corruptions.
315 xfs_bmap_check_leaf_extents(
316 xfs_btree_cur_t *cur, /* btree cursor or null */
317 xfs_inode_t *ip, /* incore inode pointer */
318 int whichfork) /* data or attr fork */
320 struct xfs_btree_block *block; /* current btree block */
321 xfs_fsblock_t bno; /* block # of "block" */
322 xfs_buf_t *bp; /* buffer for "block" */
323 int error; /* error return value */
324 xfs_extnum_t i=0, j; /* index into the extents list */
325 struct xfs_ifork *ifp; /* fork structure */
326 int level; /* btree level, for checking */
327 xfs_mount_t *mp; /* file system mount structure */
328 __be64 *pp; /* pointer to block address */
329 xfs_bmbt_rec_t *ep; /* pointer to current extent */
330 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
331 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
334 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
338 /* skip large extent count inodes */
339 if (ip->i_d.di_nextents > 10000)
344 ifp = XFS_IFORK_PTR(ip, whichfork);
345 block = ifp->if_broot;
347 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
349 level = be16_to_cpu(block->bb_level);
351 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
352 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
353 bno = be64_to_cpu(*pp);
355 ASSERT(bno != NULLFSBLOCK);
356 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
357 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
360 * Go down the tree until leaf level is reached, following the first
361 * pointer (leftmost) at each level.
363 while (level-- > 0) {
364 /* See if buf is in cur first */
366 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
369 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
375 block = XFS_BUF_TO_BLOCK(bp);
380 * Check this block for basic sanity (increasing keys and
381 * no duplicate blocks).
384 xfs_check_block(block, mp, 0, 0);
385 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
386 bno = be64_to_cpu(*pp);
387 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
388 error = -EFSCORRUPTED;
393 xfs_trans_brelse(NULL, bp);
398 * Here with bp and block set to the leftmost leaf node in the tree.
403 * Loop over all leaf nodes checking that all extents are in the right order.
406 xfs_fsblock_t nextbno;
407 xfs_extnum_t num_recs;
410 num_recs = xfs_btree_get_numrecs(block);
413 * Read-ahead the next leaf block, if any.
416 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
419 * Check all the extents to make sure they are OK.
420 * If we had a previous block, the last entry should
421 * conform with the first entry in this one.
424 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
426 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
427 xfs_bmbt_disk_get_blockcount(&last) <=
428 xfs_bmbt_disk_get_startoff(ep));
430 for (j = 1; j < num_recs; j++) {
431 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
432 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
433 xfs_bmbt_disk_get_blockcount(ep) <=
434 xfs_bmbt_disk_get_startoff(nextp));
442 xfs_trans_brelse(NULL, bp);
446 * If we've reached the end, stop.
448 if (bno == NULLFSBLOCK)
452 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
455 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
461 block = XFS_BUF_TO_BLOCK(bp);
467 xfs_warn(mp, "%s: at error0", __func__);
469 xfs_trans_brelse(NULL, bp);
471 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
473 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
474 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
479 * Validate that the bmbt_irecs being returned from bmapi are valid
480 * given the caller's original parameters. Specifically check the
481 * ranges of the returned irecs to ensure that they only extend beyond
482 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
485 xfs_bmap_validate_ret(
489 xfs_bmbt_irec_t *mval,
493 int i; /* index to map values */
495 ASSERT(ret_nmap <= nmap);
497 for (i = 0; i < ret_nmap; i++) {
498 ASSERT(mval[i].br_blockcount > 0);
499 if (!(flags & XFS_BMAPI_ENTIRE)) {
500 ASSERT(mval[i].br_startoff >= bno);
501 ASSERT(mval[i].br_blockcount <= len);
502 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
505 ASSERT(mval[i].br_startoff < bno + len);
506 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
510 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
511 mval[i].br_startoff);
512 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
513 mval[i].br_startblock != HOLESTARTBLOCK);
514 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
515 mval[i].br_state == XFS_EXT_UNWRITTEN);
520 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
521 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
525 * bmap free list manipulation functions
529 * Add the extent to the list of extents to be free at transaction end.
530 * The list is maintained sorted (by block number).
534 struct xfs_trans *tp,
537 const struct xfs_owner_info *oinfo,
540 struct xfs_extent_free_item *new; /* new element */
542 struct xfs_mount *mp = tp->t_mountp;
546 ASSERT(bno != NULLFSBLOCK);
548 ASSERT(len <= MAXEXTLEN);
549 ASSERT(!isnullstartblock(bno));
550 agno = XFS_FSB_TO_AGNO(mp, bno);
551 agbno = XFS_FSB_TO_AGBNO(mp, bno);
552 ASSERT(agno < mp->m_sb.sb_agcount);
553 ASSERT(agbno < mp->m_sb.sb_agblocks);
554 ASSERT(len < mp->m_sb.sb_agblocks);
555 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
557 ASSERT(xfs_bmap_free_item_zone != NULL);
559 new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
560 new->xefi_startblock = bno;
561 new->xefi_blockcount = (xfs_extlen_t)len;
563 new->xefi_oinfo = *oinfo;
565 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
566 new->xefi_skip_discard = skip_discard;
567 trace_xfs_bmap_free_defer(tp->t_mountp,
568 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
569 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
570 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
574 * Inode fork format manipulation functions
578 * Convert the inode format to extent format if it currently is in btree format,
579 * but the extent list is small enough that it fits into the extent format.
581 * Since the extents are already in-core, all we have to do is give up the space
582 * for the btree root and pitch the leaf block.
584 STATIC int /* error */
585 xfs_bmap_btree_to_extents(
586 struct xfs_trans *tp, /* transaction pointer */
587 struct xfs_inode *ip, /* incore inode pointer */
588 struct xfs_btree_cur *cur, /* btree cursor */
589 int *logflagsp, /* inode logging flags */
590 int whichfork) /* data or attr fork */
592 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
593 struct xfs_mount *mp = ip->i_mount;
594 struct xfs_btree_block *rblock = ifp->if_broot;
595 struct xfs_btree_block *cblock;/* child btree block */
596 xfs_fsblock_t cbno; /* child block number */
597 xfs_buf_t *cbp; /* child block's buffer */
598 int error; /* error return value */
599 __be64 *pp; /* ptr to block address */
600 struct xfs_owner_info oinfo;
602 /* check if we actually need the extent format first: */
603 if (!xfs_bmap_wants_extents(ip, whichfork))
607 ASSERT(whichfork != XFS_COW_FORK);
608 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
609 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
610 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
611 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
612 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
614 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
615 cbno = be64_to_cpu(*pp);
617 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
618 return -EFSCORRUPTED;
620 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
624 cblock = XFS_BUF_TO_BLOCK(cbp);
625 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
627 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
628 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
629 ip->i_d.di_nblocks--;
630 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
631 xfs_trans_binval(tp, cbp);
632 if (cur->bc_bufs[0] == cbp)
633 cur->bc_bufs[0] = NULL;
634 xfs_iroot_realloc(ip, -1, whichfork);
635 ASSERT(ifp->if_broot == NULL);
636 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
637 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
638 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
643 * Convert an extents-format file into a btree-format file.
644 * The new file will have a root block (in the inode) and a single child block.
646 STATIC int /* error */
647 xfs_bmap_extents_to_btree(
648 struct xfs_trans *tp, /* transaction pointer */
649 struct xfs_inode *ip, /* incore inode pointer */
650 struct xfs_btree_cur **curp, /* cursor returned to caller */
651 int wasdel, /* converting a delayed alloc */
652 int *logflagsp, /* inode logging flags */
653 int whichfork) /* data or attr fork */
655 struct xfs_btree_block *ablock; /* allocated (child) bt block */
656 struct xfs_buf *abp; /* buffer for ablock */
657 struct xfs_alloc_arg args; /* allocation arguments */
658 struct xfs_bmbt_rec *arp; /* child record pointer */
659 struct xfs_btree_block *block; /* btree root block */
660 struct xfs_btree_cur *cur; /* bmap btree cursor */
661 int error; /* error return value */
662 struct xfs_ifork *ifp; /* inode fork pointer */
663 struct xfs_bmbt_key *kp; /* root block key pointer */
664 struct xfs_mount *mp; /* mount structure */
665 xfs_bmbt_ptr_t *pp; /* root block address pointer */
666 struct xfs_iext_cursor icur;
667 struct xfs_bmbt_irec rec;
668 xfs_extnum_t cnt = 0;
671 ASSERT(whichfork != XFS_COW_FORK);
672 ifp = XFS_IFORK_PTR(ip, whichfork);
673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
676 * Make space in the inode incore. This needs to be undone if we fail
677 * to expand the root.
679 xfs_iroot_realloc(ip, 1, whichfork);
680 ifp->if_flags |= XFS_IFBROOT;
685 block = ifp->if_broot;
686 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
687 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
688 XFS_BTREE_LONG_PTRS);
690 * Need a cursor. Can't allocate until bb_level is filled in.
692 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
693 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
695 * Convert to a btree with two levels, one record in root.
697 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
698 memset(&args, 0, sizeof(args));
701 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
702 if (tp->t_firstblock == NULLFSBLOCK) {
703 args.type = XFS_ALLOCTYPE_START_BNO;
704 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
705 } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
706 args.type = XFS_ALLOCTYPE_START_BNO;
707 args.fsbno = tp->t_firstblock;
709 args.type = XFS_ALLOCTYPE_NEAR_BNO;
710 args.fsbno = tp->t_firstblock;
712 args.minlen = args.maxlen = args.prod = 1;
713 args.wasdel = wasdel;
715 error = xfs_alloc_vextent(&args);
717 goto out_root_realloc;
719 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
721 goto out_root_realloc;
725 * Allocation can't fail, the space was reserved.
727 ASSERT(tp->t_firstblock == NULLFSBLOCK ||
728 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
729 tp->t_firstblock = args.fsbno;
730 cur->bc_private.b.allocated++;
731 ip->i_d.di_nblocks++;
732 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
733 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
734 XFS_FSB_TO_DADDR(mp, args.fsbno),
735 mp->m_bsize, 0, &abp);
737 goto out_unreserve_dquot;
740 * Fill in the child block.
742 abp->b_ops = &xfs_bmbt_buf_ops;
743 ablock = XFS_BUF_TO_BLOCK(abp);
744 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
745 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
746 XFS_BTREE_LONG_PTRS);
748 for_each_xfs_iext(ifp, &icur, &rec) {
749 if (isnullstartblock(rec.br_startblock))
751 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
752 xfs_bmbt_disk_set_all(arp, &rec);
755 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
756 xfs_btree_set_numrecs(ablock, cnt);
759 * Fill in the root key and pointer.
761 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
762 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
763 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
764 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
765 be16_to_cpu(block->bb_level)));
766 *pp = cpu_to_be64(args.fsbno);
769 * Do all this logging at the end so that
770 * the root is at the right level.
772 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
773 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
774 ASSERT(*curp == NULL);
776 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
780 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
782 xfs_iroot_realloc(ip, -1, whichfork);
783 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
784 ASSERT(ifp->if_broot == NULL);
785 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
791 * Convert a local file to an extents file.
792 * This code is out of bounds for data forks of regular files,
793 * since the file data needs to get logged so things will stay consistent.
794 * (The bmap-level manipulations are ok, though).
797 xfs_bmap_local_to_extents_empty(
798 struct xfs_trans *tp,
799 struct xfs_inode *ip,
802 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
804 ASSERT(whichfork != XFS_COW_FORK);
805 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
806 ASSERT(ifp->if_bytes == 0);
807 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
809 xfs_bmap_forkoff_reset(ip, whichfork);
810 ifp->if_flags &= ~XFS_IFINLINE;
811 ifp->if_flags |= XFS_IFEXTENTS;
812 ifp->if_u1.if_root = NULL;
814 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
815 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
819 STATIC int /* error */
820 xfs_bmap_local_to_extents(
821 xfs_trans_t *tp, /* transaction pointer */
822 xfs_inode_t *ip, /* incore inode pointer */
823 xfs_extlen_t total, /* total blocks needed by transaction */
824 int *logflagsp, /* inode logging flags */
826 void (*init_fn)(struct xfs_trans *tp,
828 struct xfs_inode *ip,
829 struct xfs_ifork *ifp))
832 int flags; /* logging flags returned */
833 struct xfs_ifork *ifp; /* inode fork pointer */
834 xfs_alloc_arg_t args; /* allocation arguments */
835 xfs_buf_t *bp; /* buffer for extent block */
836 struct xfs_bmbt_irec rec;
837 struct xfs_iext_cursor icur;
840 * We don't want to deal with the case of keeping inode data inline yet.
841 * So sending the data fork of a regular inode is invalid.
843 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
844 ifp = XFS_IFORK_PTR(ip, whichfork);
845 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
847 if (!ifp->if_bytes) {
848 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
849 flags = XFS_ILOG_CORE;
855 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
856 memset(&args, 0, sizeof(args));
858 args.mp = ip->i_mount;
859 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
861 * Allocate a block. We know we need only one, since the
862 * file currently fits in an inode.
864 if (tp->t_firstblock == NULLFSBLOCK) {
865 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
866 args.type = XFS_ALLOCTYPE_START_BNO;
868 args.fsbno = tp->t_firstblock;
869 args.type = XFS_ALLOCTYPE_NEAR_BNO;
872 args.minlen = args.maxlen = args.prod = 1;
873 error = xfs_alloc_vextent(&args);
877 /* Can't fail, the space was reserved. */
878 ASSERT(args.fsbno != NULLFSBLOCK);
879 ASSERT(args.len == 1);
880 tp->t_firstblock = args.fsbno;
881 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
882 XFS_FSB_TO_DADDR(args.mp, args.fsbno),
883 args.mp->m_bsize, 0, &bp);
888 * Initialize the block, copy the data and log the remote buffer.
890 * The callout is responsible for logging because the remote format
891 * might differ from the local format and thus we don't know how much to
892 * log here. Note that init_fn must also set the buffer log item type
895 init_fn(tp, bp, ip, ifp);
897 /* account for the change in fork size */
898 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
899 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
900 flags |= XFS_ILOG_CORE;
902 ifp->if_u1.if_root = NULL;
906 rec.br_startblock = args.fsbno;
907 rec.br_blockcount = 1;
908 rec.br_state = XFS_EXT_NORM;
909 xfs_iext_first(ifp, &icur);
910 xfs_iext_insert(ip, &icur, &rec, 0);
912 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
913 ip->i_d.di_nblocks = 1;
914 xfs_trans_mod_dquot_byino(tp, ip,
915 XFS_TRANS_DQ_BCOUNT, 1L);
916 flags |= xfs_ilog_fext(whichfork);
924 * Called from xfs_bmap_add_attrfork to handle btree format files.
926 STATIC int /* error */
927 xfs_bmap_add_attrfork_btree(
928 xfs_trans_t *tp, /* transaction pointer */
929 xfs_inode_t *ip, /* incore inode pointer */
930 int *flags) /* inode logging flags */
932 xfs_btree_cur_t *cur; /* btree cursor */
933 int error; /* error return value */
934 xfs_mount_t *mp; /* file system mount struct */
935 int stat; /* newroot status */
938 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
939 *flags |= XFS_ILOG_DBROOT;
941 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
942 error = xfs_bmbt_lookup_first(cur, &stat);
945 /* must be at least one entry */
946 if (XFS_IS_CORRUPT(mp, stat != 1)) {
947 error = -EFSCORRUPTED;
950 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
953 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
956 cur->bc_private.b.allocated = 0;
957 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
961 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
966 * Called from xfs_bmap_add_attrfork to handle extents format files.
968 STATIC int /* error */
969 xfs_bmap_add_attrfork_extents(
970 struct xfs_trans *tp, /* transaction pointer */
971 struct xfs_inode *ip, /* incore inode pointer */
972 int *flags) /* inode logging flags */
974 xfs_btree_cur_t *cur; /* bmap btree cursor */
975 int error; /* error return value */
977 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
980 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
983 cur->bc_private.b.allocated = 0;
984 xfs_btree_del_cursor(cur, error);
990 * Called from xfs_bmap_add_attrfork to handle local format files. Each
991 * different data fork content type needs a different callout to do the
992 * conversion. Some are basic and only require special block initialisation
993 * callouts for the data formating, others (directories) are so specialised they
994 * handle everything themselves.
996 * XXX (dgc): investigate whether directory conversion can use the generic
997 * formatting callout. It should be possible - it's just a very complex
1000 STATIC int /* error */
1001 xfs_bmap_add_attrfork_local(
1002 struct xfs_trans *tp, /* transaction pointer */
1003 struct xfs_inode *ip, /* incore inode pointer */
1004 int *flags) /* inode logging flags */
1006 struct xfs_da_args dargs; /* args for dir/attr code */
1008 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1011 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1012 memset(&dargs, 0, sizeof(dargs));
1013 dargs.geo = ip->i_mount->m_dir_geo;
1015 dargs.total = dargs.geo->fsbcount;
1016 dargs.whichfork = XFS_DATA_FORK;
1018 return xfs_dir2_sf_to_block(&dargs);
1021 if (S_ISLNK(VFS_I(ip)->i_mode))
1022 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
1024 xfs_symlink_local_to_remote);
1026 /* should only be called for types that support local format data */
1028 return -EFSCORRUPTED;
1031 /* Set an inode attr fork off based on the format */
1033 xfs_bmap_set_attrforkoff(
1034 struct xfs_inode *ip,
1038 switch (ip->i_d.di_format) {
1039 case XFS_DINODE_FMT_DEV:
1040 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1042 case XFS_DINODE_FMT_LOCAL:
1043 case XFS_DINODE_FMT_EXTENTS:
1044 case XFS_DINODE_FMT_BTREE:
1045 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1046 if (!ip->i_d.di_forkoff)
1047 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1048 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
1060 * Convert inode from non-attributed to attributed.
1061 * Must not be in a transaction, ip must not be locked.
1063 int /* error code */
1064 xfs_bmap_add_attrfork(
1065 xfs_inode_t *ip, /* incore inode pointer */
1066 int size, /* space new attribute needs */
1067 int rsvd) /* xact may use reserved blks */
1069 xfs_mount_t *mp; /* mount structure */
1070 xfs_trans_t *tp; /* transaction pointer */
1071 int blks; /* space reservation */
1072 int version = 1; /* superblock attr version */
1073 int logflags; /* logging flags */
1074 int error; /* error return value */
1076 ASSERT(XFS_IFORK_Q(ip) == 0);
1079 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1081 blks = XFS_ADDAFORK_SPACE_RES(mp);
1083 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1084 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1088 xfs_ilock(ip, XFS_ILOCK_EXCL);
1089 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1090 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1091 XFS_QMOPT_RES_REGBLKS);
1094 if (XFS_IFORK_Q(ip))
1096 if (XFS_IS_CORRUPT(mp, ip->i_d.di_anextents != 0)) {
1097 error = -EFSCORRUPTED;
1100 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1102 * For inodes coming from pre-6.2 filesystems.
1104 ASSERT(ip->i_d.di_aformat == 0);
1105 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1108 xfs_trans_ijoin(tp, ip, 0);
1109 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1110 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1113 ASSERT(ip->i_afp == NULL);
1114 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0);
1115 ip->i_afp->if_flags = XFS_IFEXTENTS;
1117 switch (ip->i_d.di_format) {
1118 case XFS_DINODE_FMT_LOCAL:
1119 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1121 case XFS_DINODE_FMT_EXTENTS:
1122 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1124 case XFS_DINODE_FMT_BTREE:
1125 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1132 xfs_trans_log_inode(tp, ip, logflags);
1135 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1136 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1137 bool log_sb = false;
1139 spin_lock(&mp->m_sb_lock);
1140 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1141 xfs_sb_version_addattr(&mp->m_sb);
1144 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1145 xfs_sb_version_addattr2(&mp->m_sb);
1148 spin_unlock(&mp->m_sb_lock);
1153 error = xfs_trans_commit(tp);
1154 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1158 xfs_trans_cancel(tp);
1159 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1164 * Internal and external extent tree search functions.
1167 struct xfs_iread_state {
1168 struct xfs_iext_cursor icur;
1169 xfs_extnum_t loaded;
1172 /* Stuff every bmbt record from this block into the incore extent map. */
1174 xfs_iread_bmbt_block(
1175 struct xfs_btree_cur *cur,
1179 struct xfs_iread_state *ir = priv;
1180 struct xfs_mount *mp = cur->bc_mp;
1181 struct xfs_inode *ip = cur->bc_private.b.ip;
1182 struct xfs_btree_block *block;
1184 struct xfs_bmbt_rec *frp;
1185 xfs_extnum_t num_recs;
1187 int whichfork = cur->bc_private.b.whichfork;
1189 block = xfs_btree_get_block(cur, level, &bp);
1191 /* Abort if we find more records than nextents. */
1192 num_recs = xfs_btree_get_numrecs(block);
1193 if (unlikely(ir->loaded + num_recs >
1194 XFS_IFORK_NEXTENTS(ip, whichfork))) {
1195 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1196 (unsigned long long)ip->i_ino);
1197 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1198 sizeof(*block), __this_address);
1199 return -EFSCORRUPTED;
1202 /* Copy records into the incore cache. */
1203 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1204 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1205 struct xfs_bmbt_irec new;
1208 xfs_bmbt_disk_get_all(frp, &new);
1209 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1211 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1212 "xfs_iread_extents(2)", frp,
1214 return -EFSCORRUPTED;
1216 xfs_iext_insert(ip, &ir->icur, &new,
1217 xfs_bmap_fork_to_state(whichfork));
1218 trace_xfs_read_extent(ip, &ir->icur,
1219 xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1220 xfs_iext_next(XFS_IFORK_PTR(ip, whichfork), &ir->icur);
1227 * Read in extents from a btree-format inode.
1231 struct xfs_trans *tp,
1232 struct xfs_inode *ip,
1235 struct xfs_iread_state ir;
1236 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1237 struct xfs_mount *mp = ip->i_mount;
1238 struct xfs_btree_cur *cur;
1241 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1243 if (XFS_IS_CORRUPT(mp,
1244 XFS_IFORK_FORMAT(ip, whichfork) !=
1245 XFS_DINODE_FMT_BTREE)) {
1246 error = -EFSCORRUPTED;
1251 xfs_iext_first(ifp, &ir.icur);
1252 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1253 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1254 XFS_BTREE_VISIT_RECORDS, &ir);
1255 xfs_btree_del_cursor(cur, error);
1259 if (XFS_IS_CORRUPT(mp,
1260 ir.loaded != XFS_IFORK_NEXTENTS(ip, whichfork))) {
1261 error = -EFSCORRUPTED;
1264 ASSERT(ir.loaded == xfs_iext_count(ifp));
1266 ifp->if_flags |= XFS_IFEXTENTS;
1269 xfs_iext_destroy(ifp);
1274 * Returns the relative block number of the first unused block(s) in the given
1275 * fork with at least "len" logically contiguous blocks free. This is the
1276 * lowest-address hole if the fork has holes, else the first block past the end
1277 * of fork. Return 0 if the fork is currently local (in-inode).
1280 xfs_bmap_first_unused(
1281 struct xfs_trans *tp, /* transaction pointer */
1282 struct xfs_inode *ip, /* incore inode */
1283 xfs_extlen_t len, /* size of hole to find */
1284 xfs_fileoff_t *first_unused, /* unused block */
1285 int whichfork) /* data or attr fork */
1287 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1288 struct xfs_bmbt_irec got;
1289 struct xfs_iext_cursor icur;
1290 xfs_fileoff_t lastaddr = 0;
1291 xfs_fileoff_t lowest, max;
1294 ASSERT(xfs_ifork_has_extents(ip, whichfork) ||
1295 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1297 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1302 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1303 error = xfs_iread_extents(tp, ip, whichfork);
1308 lowest = max = *first_unused;
1309 for_each_xfs_iext(ifp, &icur, &got) {
1311 * See if the hole before this extent will work.
1313 if (got.br_startoff >= lowest + len &&
1314 got.br_startoff - max >= len)
1316 lastaddr = got.br_startoff + got.br_blockcount;
1317 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1320 *first_unused = max;
1325 * Returns the file-relative block number of the last block - 1 before
1326 * last_block (input value) in the file.
1327 * This is not based on i_size, it is based on the extent records.
1328 * Returns 0 for local files, as they do not have extent records.
1331 xfs_bmap_last_before(
1332 struct xfs_trans *tp, /* transaction pointer */
1333 struct xfs_inode *ip, /* incore inode */
1334 xfs_fileoff_t *last_block, /* last block */
1335 int whichfork) /* data or attr fork */
1337 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1338 struct xfs_bmbt_irec got;
1339 struct xfs_iext_cursor icur;
1342 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1343 case XFS_DINODE_FMT_LOCAL:
1346 case XFS_DINODE_FMT_BTREE:
1347 case XFS_DINODE_FMT_EXTENTS:
1351 return -EFSCORRUPTED;
1354 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1355 error = xfs_iread_extents(tp, ip, whichfork);
1360 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1366 xfs_bmap_last_extent(
1367 struct xfs_trans *tp,
1368 struct xfs_inode *ip,
1370 struct xfs_bmbt_irec *rec,
1373 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1374 struct xfs_iext_cursor icur;
1377 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1378 error = xfs_iread_extents(tp, ip, whichfork);
1383 xfs_iext_last(ifp, &icur);
1384 if (!xfs_iext_get_extent(ifp, &icur, rec))
1392 * Check the last inode extent to determine whether this allocation will result
1393 * in blocks being allocated at the end of the file. When we allocate new data
1394 * blocks at the end of the file which do not start at the previous data block,
1395 * we will try to align the new blocks at stripe unit boundaries.
1397 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1398 * at, or past the EOF.
1402 struct xfs_bmalloca *bma,
1405 struct xfs_bmbt_irec rec;
1410 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1421 * Check if we are allocation or past the last extent, or at least into
1422 * the last delayed allocated extent.
1424 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1425 (bma->offset >= rec.br_startoff &&
1426 isnullstartblock(rec.br_startblock));
1431 * Returns the file-relative block number of the first block past eof in
1432 * the file. This is not based on i_size, it is based on the extent records.
1433 * Returns 0 for local files, as they do not have extent records.
1436 xfs_bmap_last_offset(
1437 struct xfs_inode *ip,
1438 xfs_fileoff_t *last_block,
1441 struct xfs_bmbt_irec rec;
1447 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1450 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ip, whichfork)))
1451 return -EFSCORRUPTED;
1453 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1454 if (error || is_empty)
1457 *last_block = rec.br_startoff + rec.br_blockcount;
1462 * Returns whether the selected fork of the inode has exactly one
1463 * block or not. For the data fork we check this matches di_size,
1464 * implying the file's range is 0..bsize-1.
1466 int /* 1=>1 block, 0=>otherwise */
1468 xfs_inode_t *ip, /* incore inode */
1469 int whichfork) /* data or attr fork */
1471 struct xfs_ifork *ifp; /* inode fork pointer */
1472 int rval; /* return value */
1473 xfs_bmbt_irec_t s; /* internal version of extent */
1474 struct xfs_iext_cursor icur;
1477 if (whichfork == XFS_DATA_FORK)
1478 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1480 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1482 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1484 ifp = XFS_IFORK_PTR(ip, whichfork);
1485 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1486 xfs_iext_first(ifp, &icur);
1487 xfs_iext_get_extent(ifp, &icur, &s);
1488 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1489 if (rval && whichfork == XFS_DATA_FORK)
1490 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1495 * Extent tree manipulation functions used during allocation.
1499 * Convert a delayed allocation to a real allocation.
1501 STATIC int /* error */
1502 xfs_bmap_add_extent_delay_real(
1503 struct xfs_bmalloca *bma,
1506 struct xfs_bmbt_irec *new = &bma->got;
1507 int error; /* error return value */
1508 int i; /* temp state */
1509 struct xfs_ifork *ifp; /* inode fork pointer */
1510 xfs_fileoff_t new_endoff; /* end offset of new entry */
1511 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1512 /* left is 0, right is 1, prev is 2 */
1513 int rval=0; /* return value (logging flags) */
1514 int state = xfs_bmap_fork_to_state(whichfork);
1515 xfs_filblks_t da_new; /* new count del alloc blocks used */
1516 xfs_filblks_t da_old; /* old count del alloc blocks used */
1517 xfs_filblks_t temp=0; /* value for da_new calculations */
1518 int tmp_rval; /* partial logging flags */
1519 struct xfs_mount *mp;
1520 xfs_extnum_t *nextents;
1521 struct xfs_bmbt_irec old;
1523 mp = bma->ip->i_mount;
1524 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1525 ASSERT(whichfork != XFS_ATTR_FORK);
1526 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1527 &bma->ip->i_d.di_nextents);
1529 ASSERT(!isnullstartblock(new->br_startblock));
1531 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1533 XFS_STATS_INC(mp, xs_add_exlist);
1540 * Set up a bunch of variables to make the tests simpler.
1542 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1543 new_endoff = new->br_startoff + new->br_blockcount;
1544 ASSERT(isnullstartblock(PREV.br_startblock));
1545 ASSERT(PREV.br_startoff <= new->br_startoff);
1546 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1548 da_old = startblockval(PREV.br_startblock);
1552 * Set flags determining what part of the previous delayed allocation
1553 * extent is being replaced by a real allocation.
1555 if (PREV.br_startoff == new->br_startoff)
1556 state |= BMAP_LEFT_FILLING;
1557 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1558 state |= BMAP_RIGHT_FILLING;
1561 * Check and set flags if this segment has a left neighbor.
1562 * Don't set contiguous if the combined extent would be too large.
1564 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1565 state |= BMAP_LEFT_VALID;
1566 if (isnullstartblock(LEFT.br_startblock))
1567 state |= BMAP_LEFT_DELAY;
1570 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1571 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1572 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1573 LEFT.br_state == new->br_state &&
1574 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1575 state |= BMAP_LEFT_CONTIG;
1578 * Check and set flags if this segment has a right neighbor.
1579 * Don't set contiguous if the combined extent would be too large.
1580 * Also check for all-three-contiguous being too large.
1582 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1583 state |= BMAP_RIGHT_VALID;
1584 if (isnullstartblock(RIGHT.br_startblock))
1585 state |= BMAP_RIGHT_DELAY;
1588 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1589 new_endoff == RIGHT.br_startoff &&
1590 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1591 new->br_state == RIGHT.br_state &&
1592 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1593 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1594 BMAP_RIGHT_FILLING)) !=
1595 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1596 BMAP_RIGHT_FILLING) ||
1597 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1599 state |= BMAP_RIGHT_CONTIG;
1603 * Switch out based on the FILLING and CONTIG state bits.
1605 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1606 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1607 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1608 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1610 * Filling in all of a previously delayed allocation extent.
1611 * The left and right neighbors are both contiguous with new.
1613 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1615 xfs_iext_remove(bma->ip, &bma->icur, state);
1616 xfs_iext_remove(bma->ip, &bma->icur, state);
1617 xfs_iext_prev(ifp, &bma->icur);
1618 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1621 if (bma->cur == NULL)
1622 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1624 rval = XFS_ILOG_CORE;
1625 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1628 if (XFS_IS_CORRUPT(mp, i != 1)) {
1629 error = -EFSCORRUPTED;
1632 error = xfs_btree_delete(bma->cur, &i);
1635 if (XFS_IS_CORRUPT(mp, i != 1)) {
1636 error = -EFSCORRUPTED;
1639 error = xfs_btree_decrement(bma->cur, 0, &i);
1642 if (XFS_IS_CORRUPT(mp, i != 1)) {
1643 error = -EFSCORRUPTED;
1646 error = xfs_bmbt_update(bma->cur, &LEFT);
1652 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1654 * Filling in all of a previously delayed allocation extent.
1655 * The left neighbor is contiguous, the right is not.
1658 LEFT.br_blockcount += PREV.br_blockcount;
1660 xfs_iext_remove(bma->ip, &bma->icur, state);
1661 xfs_iext_prev(ifp, &bma->icur);
1662 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1664 if (bma->cur == NULL)
1665 rval = XFS_ILOG_DEXT;
1668 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1671 if (XFS_IS_CORRUPT(mp, i != 1)) {
1672 error = -EFSCORRUPTED;
1675 error = xfs_bmbt_update(bma->cur, &LEFT);
1681 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1683 * Filling in all of a previously delayed allocation extent.
1684 * The right neighbor is contiguous, the left is not. Take care
1685 * with delay -> unwritten extent allocation here because the
1686 * delalloc record we are overwriting is always written.
1688 PREV.br_startblock = new->br_startblock;
1689 PREV.br_blockcount += RIGHT.br_blockcount;
1690 PREV.br_state = new->br_state;
1692 xfs_iext_next(ifp, &bma->icur);
1693 xfs_iext_remove(bma->ip, &bma->icur, state);
1694 xfs_iext_prev(ifp, &bma->icur);
1695 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1697 if (bma->cur == NULL)
1698 rval = XFS_ILOG_DEXT;
1701 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1704 if (XFS_IS_CORRUPT(mp, i != 1)) {
1705 error = -EFSCORRUPTED;
1708 error = xfs_bmbt_update(bma->cur, &PREV);
1714 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1716 * Filling in all of a previously delayed allocation extent.
1717 * Neither the left nor right neighbors are contiguous with
1720 PREV.br_startblock = new->br_startblock;
1721 PREV.br_state = new->br_state;
1722 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1725 if (bma->cur == NULL)
1726 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1728 rval = XFS_ILOG_CORE;
1729 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1732 if (XFS_IS_CORRUPT(mp, i != 0)) {
1733 error = -EFSCORRUPTED;
1736 error = xfs_btree_insert(bma->cur, &i);
1739 if (XFS_IS_CORRUPT(mp, i != 1)) {
1740 error = -EFSCORRUPTED;
1746 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1748 * Filling in the first part of a previous delayed allocation.
1749 * The left neighbor is contiguous.
1752 temp = PREV.br_blockcount - new->br_blockcount;
1753 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1754 startblockval(PREV.br_startblock));
1756 LEFT.br_blockcount += new->br_blockcount;
1758 PREV.br_blockcount = temp;
1759 PREV.br_startoff += new->br_blockcount;
1760 PREV.br_startblock = nullstartblock(da_new);
1762 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1763 xfs_iext_prev(ifp, &bma->icur);
1764 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1766 if (bma->cur == NULL)
1767 rval = XFS_ILOG_DEXT;
1770 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1773 if (XFS_IS_CORRUPT(mp, i != 1)) {
1774 error = -EFSCORRUPTED;
1777 error = xfs_bmbt_update(bma->cur, &LEFT);
1783 case BMAP_LEFT_FILLING:
1785 * Filling in the first part of a previous delayed allocation.
1786 * The left neighbor is not contiguous.
1788 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1790 if (bma->cur == NULL)
1791 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1793 rval = XFS_ILOG_CORE;
1794 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1797 if (XFS_IS_CORRUPT(mp, i != 0)) {
1798 error = -EFSCORRUPTED;
1801 error = xfs_btree_insert(bma->cur, &i);
1804 if (XFS_IS_CORRUPT(mp, i != 1)) {
1805 error = -EFSCORRUPTED;
1810 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1811 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1812 &bma->cur, 1, &tmp_rval, whichfork);
1818 temp = PREV.br_blockcount - new->br_blockcount;
1819 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1820 startblockval(PREV.br_startblock) -
1821 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1823 PREV.br_startoff = new_endoff;
1824 PREV.br_blockcount = temp;
1825 PREV.br_startblock = nullstartblock(da_new);
1826 xfs_iext_next(ifp, &bma->icur);
1827 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1828 xfs_iext_prev(ifp, &bma->icur);
1831 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1833 * Filling in the last part of a previous delayed allocation.
1834 * The right neighbor is contiguous with the new allocation.
1837 RIGHT.br_startoff = new->br_startoff;
1838 RIGHT.br_startblock = new->br_startblock;
1839 RIGHT.br_blockcount += new->br_blockcount;
1841 if (bma->cur == NULL)
1842 rval = XFS_ILOG_DEXT;
1845 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1848 if (XFS_IS_CORRUPT(mp, i != 1)) {
1849 error = -EFSCORRUPTED;
1852 error = xfs_bmbt_update(bma->cur, &RIGHT);
1857 temp = PREV.br_blockcount - new->br_blockcount;
1858 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1859 startblockval(PREV.br_startblock));
1861 PREV.br_blockcount = temp;
1862 PREV.br_startblock = nullstartblock(da_new);
1864 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1865 xfs_iext_next(ifp, &bma->icur);
1866 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1869 case BMAP_RIGHT_FILLING:
1871 * Filling in the last part of a previous delayed allocation.
1872 * The right neighbor is not contiguous.
1874 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1876 if (bma->cur == NULL)
1877 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1879 rval = XFS_ILOG_CORE;
1880 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1883 if (XFS_IS_CORRUPT(mp, i != 0)) {
1884 error = -EFSCORRUPTED;
1887 error = xfs_btree_insert(bma->cur, &i);
1890 if (XFS_IS_CORRUPT(mp, i != 1)) {
1891 error = -EFSCORRUPTED;
1896 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1897 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1898 &bma->cur, 1, &tmp_rval, whichfork);
1904 temp = PREV.br_blockcount - new->br_blockcount;
1905 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1906 startblockval(PREV.br_startblock) -
1907 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1909 PREV.br_startblock = nullstartblock(da_new);
1910 PREV.br_blockcount = temp;
1911 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1912 xfs_iext_next(ifp, &bma->icur);
1917 * Filling in the middle part of a previous delayed allocation.
1918 * Contiguity is impossible here.
1919 * This case is avoided almost all the time.
1921 * We start with a delayed allocation:
1923 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1926 * and we are allocating:
1927 * +rrrrrrrrrrrrrrrrr+
1930 * and we set it up for insertion as:
1931 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1933 * PREV @ idx LEFT RIGHT
1934 * inserted at idx + 1
1938 /* LEFT is the new middle */
1941 /* RIGHT is the new right */
1942 RIGHT.br_state = PREV.br_state;
1943 RIGHT.br_startoff = new_endoff;
1944 RIGHT.br_blockcount =
1945 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1946 RIGHT.br_startblock =
1947 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1948 RIGHT.br_blockcount));
1951 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1952 PREV.br_startblock =
1953 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1954 PREV.br_blockcount));
1955 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1957 xfs_iext_next(ifp, &bma->icur);
1958 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1959 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1962 if (bma->cur == NULL)
1963 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1965 rval = XFS_ILOG_CORE;
1966 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1969 if (XFS_IS_CORRUPT(mp, i != 0)) {
1970 error = -EFSCORRUPTED;
1973 error = xfs_btree_insert(bma->cur, &i);
1976 if (XFS_IS_CORRUPT(mp, i != 1)) {
1977 error = -EFSCORRUPTED;
1982 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1983 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1984 &bma->cur, 1, &tmp_rval, whichfork);
1990 da_new = startblockval(PREV.br_startblock) +
1991 startblockval(RIGHT.br_startblock);
1994 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1995 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1996 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1997 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1998 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1999 case BMAP_LEFT_CONTIG:
2000 case BMAP_RIGHT_CONTIG:
2002 * These cases are all impossible.
2007 /* add reverse mapping unless caller opted out */
2008 if (!(bma->flags & XFS_BMAPI_NORMAP))
2009 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
2011 /* convert to a btree if necessary */
2012 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2013 int tmp_logflags; /* partial log flag return val */
2015 ASSERT(bma->cur == NULL);
2016 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2017 &bma->cur, da_old > 0, &tmp_logflags,
2019 bma->logflags |= tmp_logflags;
2024 if (da_new != da_old)
2025 xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
2028 da_new += bma->cur->bc_private.b.allocated;
2029 bma->cur->bc_private.b.allocated = 0;
2032 /* adjust for changes in reserved delayed indirect blocks */
2033 if (da_new != da_old) {
2034 ASSERT(state == 0 || da_new < da_old);
2035 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2039 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2041 if (whichfork != XFS_COW_FORK)
2042 bma->logflags |= rval;
2050 * Convert an unwritten allocation to a real allocation or vice versa.
2053 xfs_bmap_add_extent_unwritten_real(
2054 struct xfs_trans *tp,
2055 xfs_inode_t *ip, /* incore inode pointer */
2057 struct xfs_iext_cursor *icur,
2058 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2059 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2060 int *logflagsp) /* inode logging flags */
2062 xfs_btree_cur_t *cur; /* btree cursor */
2063 int error; /* error return value */
2064 int i; /* temp state */
2065 struct xfs_ifork *ifp; /* inode fork pointer */
2066 xfs_fileoff_t new_endoff; /* end offset of new entry */
2067 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2068 /* left is 0, right is 1, prev is 2 */
2069 int rval=0; /* return value (logging flags) */
2070 int state = xfs_bmap_fork_to_state(whichfork);
2071 struct xfs_mount *mp = ip->i_mount;
2072 struct xfs_bmbt_irec old;
2077 ifp = XFS_IFORK_PTR(ip, whichfork);
2079 ASSERT(!isnullstartblock(new->br_startblock));
2081 XFS_STATS_INC(mp, xs_add_exlist);
2088 * Set up a bunch of variables to make the tests simpler.
2091 xfs_iext_get_extent(ifp, icur, &PREV);
2092 ASSERT(new->br_state != PREV.br_state);
2093 new_endoff = new->br_startoff + new->br_blockcount;
2094 ASSERT(PREV.br_startoff <= new->br_startoff);
2095 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2098 * Set flags determining what part of the previous oldext allocation
2099 * extent is being replaced by a newext allocation.
2101 if (PREV.br_startoff == new->br_startoff)
2102 state |= BMAP_LEFT_FILLING;
2103 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2104 state |= BMAP_RIGHT_FILLING;
2107 * Check and set flags if this segment has a left neighbor.
2108 * Don't set contiguous if the combined extent would be too large.
2110 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2111 state |= BMAP_LEFT_VALID;
2112 if (isnullstartblock(LEFT.br_startblock))
2113 state |= BMAP_LEFT_DELAY;
2116 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2117 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2118 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2119 LEFT.br_state == new->br_state &&
2120 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2121 state |= BMAP_LEFT_CONTIG;
2124 * Check and set flags if this segment has a right neighbor.
2125 * Don't set contiguous if the combined extent would be too large.
2126 * Also check for all-three-contiguous being too large.
2128 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2129 state |= BMAP_RIGHT_VALID;
2130 if (isnullstartblock(RIGHT.br_startblock))
2131 state |= BMAP_RIGHT_DELAY;
2134 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2135 new_endoff == RIGHT.br_startoff &&
2136 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2137 new->br_state == RIGHT.br_state &&
2138 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2139 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2140 BMAP_RIGHT_FILLING)) !=
2141 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2142 BMAP_RIGHT_FILLING) ||
2143 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2145 state |= BMAP_RIGHT_CONTIG;
2148 * Switch out based on the FILLING and CONTIG state bits.
2150 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2151 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2152 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2153 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2155 * Setting all of a previous oldext extent to newext.
2156 * The left and right neighbors are both contiguous with new.
2158 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2160 xfs_iext_remove(ip, icur, state);
2161 xfs_iext_remove(ip, icur, state);
2162 xfs_iext_prev(ifp, icur);
2163 xfs_iext_update_extent(ip, state, icur, &LEFT);
2164 XFS_IFORK_NEXT_SET(ip, whichfork,
2165 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2167 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2169 rval = XFS_ILOG_CORE;
2170 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2173 if (XFS_IS_CORRUPT(mp, i != 1)) {
2174 error = -EFSCORRUPTED;
2177 if ((error = xfs_btree_delete(cur, &i)))
2179 if (XFS_IS_CORRUPT(mp, i != 1)) {
2180 error = -EFSCORRUPTED;
2183 if ((error = xfs_btree_decrement(cur, 0, &i)))
2185 if (XFS_IS_CORRUPT(mp, i != 1)) {
2186 error = -EFSCORRUPTED;
2189 if ((error = xfs_btree_delete(cur, &i)))
2191 if (XFS_IS_CORRUPT(mp, i != 1)) {
2192 error = -EFSCORRUPTED;
2195 if ((error = xfs_btree_decrement(cur, 0, &i)))
2197 if (XFS_IS_CORRUPT(mp, i != 1)) {
2198 error = -EFSCORRUPTED;
2201 error = xfs_bmbt_update(cur, &LEFT);
2207 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2209 * Setting all of a previous oldext extent to newext.
2210 * The left neighbor is contiguous, the right is not.
2212 LEFT.br_blockcount += PREV.br_blockcount;
2214 xfs_iext_remove(ip, icur, state);
2215 xfs_iext_prev(ifp, icur);
2216 xfs_iext_update_extent(ip, state, icur, &LEFT);
2217 XFS_IFORK_NEXT_SET(ip, whichfork,
2218 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2220 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2222 rval = XFS_ILOG_CORE;
2223 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2226 if (XFS_IS_CORRUPT(mp, i != 1)) {
2227 error = -EFSCORRUPTED;
2230 if ((error = xfs_btree_delete(cur, &i)))
2232 if (XFS_IS_CORRUPT(mp, i != 1)) {
2233 error = -EFSCORRUPTED;
2236 if ((error = xfs_btree_decrement(cur, 0, &i)))
2238 if (XFS_IS_CORRUPT(mp, i != 1)) {
2239 error = -EFSCORRUPTED;
2242 error = xfs_bmbt_update(cur, &LEFT);
2248 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2250 * Setting all of a previous oldext extent to newext.
2251 * The right neighbor is contiguous, the left is not.
2253 PREV.br_blockcount += RIGHT.br_blockcount;
2254 PREV.br_state = new->br_state;
2256 xfs_iext_next(ifp, icur);
2257 xfs_iext_remove(ip, icur, state);
2258 xfs_iext_prev(ifp, icur);
2259 xfs_iext_update_extent(ip, state, icur, &PREV);
2261 XFS_IFORK_NEXT_SET(ip, whichfork,
2262 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2264 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2266 rval = XFS_ILOG_CORE;
2267 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2270 if (XFS_IS_CORRUPT(mp, i != 1)) {
2271 error = -EFSCORRUPTED;
2274 if ((error = xfs_btree_delete(cur, &i)))
2276 if (XFS_IS_CORRUPT(mp, i != 1)) {
2277 error = -EFSCORRUPTED;
2280 if ((error = xfs_btree_decrement(cur, 0, &i)))
2282 if (XFS_IS_CORRUPT(mp, i != 1)) {
2283 error = -EFSCORRUPTED;
2286 error = xfs_bmbt_update(cur, &PREV);
2292 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2294 * Setting all of a previous oldext extent to newext.
2295 * Neither the left nor right neighbors are contiguous with
2298 PREV.br_state = new->br_state;
2299 xfs_iext_update_extent(ip, state, icur, &PREV);
2302 rval = XFS_ILOG_DEXT;
2305 error = xfs_bmbt_lookup_eq(cur, new, &i);
2308 if (XFS_IS_CORRUPT(mp, i != 1)) {
2309 error = -EFSCORRUPTED;
2312 error = xfs_bmbt_update(cur, &PREV);
2318 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2320 * Setting the first part of a previous oldext extent to newext.
2321 * The left neighbor is contiguous.
2323 LEFT.br_blockcount += new->br_blockcount;
2326 PREV.br_startoff += new->br_blockcount;
2327 PREV.br_startblock += new->br_blockcount;
2328 PREV.br_blockcount -= new->br_blockcount;
2330 xfs_iext_update_extent(ip, state, icur, &PREV);
2331 xfs_iext_prev(ifp, icur);
2332 xfs_iext_update_extent(ip, state, icur, &LEFT);
2335 rval = XFS_ILOG_DEXT;
2338 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2341 if (XFS_IS_CORRUPT(mp, i != 1)) {
2342 error = -EFSCORRUPTED;
2345 error = xfs_bmbt_update(cur, &PREV);
2348 error = xfs_btree_decrement(cur, 0, &i);
2351 error = xfs_bmbt_update(cur, &LEFT);
2357 case BMAP_LEFT_FILLING:
2359 * Setting the first part of a previous oldext extent to newext.
2360 * The left neighbor is not contiguous.
2363 PREV.br_startoff += new->br_blockcount;
2364 PREV.br_startblock += new->br_blockcount;
2365 PREV.br_blockcount -= new->br_blockcount;
2367 xfs_iext_update_extent(ip, state, icur, &PREV);
2368 xfs_iext_insert(ip, icur, new, state);
2369 XFS_IFORK_NEXT_SET(ip, whichfork,
2370 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2372 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2374 rval = XFS_ILOG_CORE;
2375 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2378 if (XFS_IS_CORRUPT(mp, i != 1)) {
2379 error = -EFSCORRUPTED;
2382 error = xfs_bmbt_update(cur, &PREV);
2385 cur->bc_rec.b = *new;
2386 if ((error = xfs_btree_insert(cur, &i)))
2388 if (XFS_IS_CORRUPT(mp, i != 1)) {
2389 error = -EFSCORRUPTED;
2395 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2397 * Setting the last part of a previous oldext extent to newext.
2398 * The right neighbor is contiguous with the new allocation.
2401 PREV.br_blockcount -= new->br_blockcount;
2403 RIGHT.br_startoff = new->br_startoff;
2404 RIGHT.br_startblock = new->br_startblock;
2405 RIGHT.br_blockcount += new->br_blockcount;
2407 xfs_iext_update_extent(ip, state, icur, &PREV);
2408 xfs_iext_next(ifp, icur);
2409 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2412 rval = XFS_ILOG_DEXT;
2415 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2418 if (XFS_IS_CORRUPT(mp, i != 1)) {
2419 error = -EFSCORRUPTED;
2422 error = xfs_bmbt_update(cur, &PREV);
2425 error = xfs_btree_increment(cur, 0, &i);
2428 error = xfs_bmbt_update(cur, &RIGHT);
2434 case BMAP_RIGHT_FILLING:
2436 * Setting the last part of a previous oldext extent to newext.
2437 * The right neighbor is not contiguous.
2440 PREV.br_blockcount -= new->br_blockcount;
2442 xfs_iext_update_extent(ip, state, icur, &PREV);
2443 xfs_iext_next(ifp, icur);
2444 xfs_iext_insert(ip, icur, new, state);
2446 XFS_IFORK_NEXT_SET(ip, whichfork,
2447 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2449 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2451 rval = XFS_ILOG_CORE;
2452 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2455 if (XFS_IS_CORRUPT(mp, i != 1)) {
2456 error = -EFSCORRUPTED;
2459 error = xfs_bmbt_update(cur, &PREV);
2462 error = xfs_bmbt_lookup_eq(cur, new, &i);
2465 if (XFS_IS_CORRUPT(mp, i != 0)) {
2466 error = -EFSCORRUPTED;
2469 if ((error = xfs_btree_insert(cur, &i)))
2471 if (XFS_IS_CORRUPT(mp, i != 1)) {
2472 error = -EFSCORRUPTED;
2480 * Setting the middle part of a previous oldext extent to
2481 * newext. Contiguity is impossible here.
2482 * One extent becomes three extents.
2485 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2488 r[1].br_startoff = new_endoff;
2489 r[1].br_blockcount =
2490 old.br_startoff + old.br_blockcount - new_endoff;
2491 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2492 r[1].br_state = PREV.br_state;
2494 xfs_iext_update_extent(ip, state, icur, &PREV);
2495 xfs_iext_next(ifp, icur);
2496 xfs_iext_insert(ip, icur, &r[1], state);
2497 xfs_iext_insert(ip, icur, &r[0], state);
2499 XFS_IFORK_NEXT_SET(ip, whichfork,
2500 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2502 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2504 rval = XFS_ILOG_CORE;
2505 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2508 if (XFS_IS_CORRUPT(mp, i != 1)) {
2509 error = -EFSCORRUPTED;
2512 /* new right extent - oldext */
2513 error = xfs_bmbt_update(cur, &r[1]);
2516 /* new left extent - oldext */
2517 cur->bc_rec.b = PREV;
2518 if ((error = xfs_btree_insert(cur, &i)))
2520 if (XFS_IS_CORRUPT(mp, i != 1)) {
2521 error = -EFSCORRUPTED;
2525 * Reset the cursor to the position of the new extent
2526 * we are about to insert as we can't trust it after
2527 * the previous insert.
2529 error = xfs_bmbt_lookup_eq(cur, new, &i);
2532 if (XFS_IS_CORRUPT(mp, i != 0)) {
2533 error = -EFSCORRUPTED;
2536 /* new middle extent - newext */
2537 if ((error = xfs_btree_insert(cur, &i)))
2539 if (XFS_IS_CORRUPT(mp, i != 1)) {
2540 error = -EFSCORRUPTED;
2546 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2547 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2548 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2549 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2550 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2551 case BMAP_LEFT_CONTIG:
2552 case BMAP_RIGHT_CONTIG:
2554 * These cases are all impossible.
2559 /* update reverse mappings */
2560 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2562 /* convert to a btree if necessary */
2563 if (xfs_bmap_needs_btree(ip, whichfork)) {
2564 int tmp_logflags; /* partial log flag return val */
2566 ASSERT(cur == NULL);
2567 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2568 &tmp_logflags, whichfork);
2569 *logflagsp |= tmp_logflags;
2574 /* clear out the allocated field, done with it now in any case. */
2576 cur->bc_private.b.allocated = 0;
2580 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2590 * Convert a hole to a delayed allocation.
2593 xfs_bmap_add_extent_hole_delay(
2594 xfs_inode_t *ip, /* incore inode pointer */
2596 struct xfs_iext_cursor *icur,
2597 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2599 struct xfs_ifork *ifp; /* inode fork pointer */
2600 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2601 xfs_filblks_t newlen=0; /* new indirect size */
2602 xfs_filblks_t oldlen=0; /* old indirect size */
2603 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2604 int state = xfs_bmap_fork_to_state(whichfork);
2605 xfs_filblks_t temp; /* temp for indirect calculations */
2607 ifp = XFS_IFORK_PTR(ip, whichfork);
2608 ASSERT(isnullstartblock(new->br_startblock));
2611 * Check and set flags if this segment has a left neighbor
2613 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2614 state |= BMAP_LEFT_VALID;
2615 if (isnullstartblock(left.br_startblock))
2616 state |= BMAP_LEFT_DELAY;
2620 * Check and set flags if the current (right) segment exists.
2621 * If it doesn't exist, we're converting the hole at end-of-file.
2623 if (xfs_iext_get_extent(ifp, icur, &right)) {
2624 state |= BMAP_RIGHT_VALID;
2625 if (isnullstartblock(right.br_startblock))
2626 state |= BMAP_RIGHT_DELAY;
2630 * Set contiguity flags on the left and right neighbors.
2631 * Don't let extents get too large, even if the pieces are contiguous.
2633 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2634 left.br_startoff + left.br_blockcount == new->br_startoff &&
2635 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2636 state |= BMAP_LEFT_CONTIG;
2638 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2639 new->br_startoff + new->br_blockcount == right.br_startoff &&
2640 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2641 (!(state & BMAP_LEFT_CONTIG) ||
2642 (left.br_blockcount + new->br_blockcount +
2643 right.br_blockcount <= MAXEXTLEN)))
2644 state |= BMAP_RIGHT_CONTIG;
2647 * Switch out based on the contiguity flags.
2649 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2650 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2652 * New allocation is contiguous with delayed allocations
2653 * on the left and on the right.
2654 * Merge all three into a single extent record.
2656 temp = left.br_blockcount + new->br_blockcount +
2657 right.br_blockcount;
2659 oldlen = startblockval(left.br_startblock) +
2660 startblockval(new->br_startblock) +
2661 startblockval(right.br_startblock);
2662 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2664 left.br_startblock = nullstartblock(newlen);
2665 left.br_blockcount = temp;
2667 xfs_iext_remove(ip, icur, state);
2668 xfs_iext_prev(ifp, icur);
2669 xfs_iext_update_extent(ip, state, icur, &left);
2672 case BMAP_LEFT_CONTIG:
2674 * New allocation is contiguous with a delayed allocation
2676 * Merge the new allocation with the left neighbor.
2678 temp = left.br_blockcount + new->br_blockcount;
2680 oldlen = startblockval(left.br_startblock) +
2681 startblockval(new->br_startblock);
2682 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2684 left.br_blockcount = temp;
2685 left.br_startblock = nullstartblock(newlen);
2687 xfs_iext_prev(ifp, icur);
2688 xfs_iext_update_extent(ip, state, icur, &left);
2691 case BMAP_RIGHT_CONTIG:
2693 * New allocation is contiguous with a delayed allocation
2695 * Merge the new allocation with the right neighbor.
2697 temp = new->br_blockcount + right.br_blockcount;
2698 oldlen = startblockval(new->br_startblock) +
2699 startblockval(right.br_startblock);
2700 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2702 right.br_startoff = new->br_startoff;
2703 right.br_startblock = nullstartblock(newlen);
2704 right.br_blockcount = temp;
2705 xfs_iext_update_extent(ip, state, icur, &right);
2710 * New allocation is not contiguous with another
2711 * delayed allocation.
2712 * Insert a new entry.
2714 oldlen = newlen = 0;
2715 xfs_iext_insert(ip, icur, new, state);
2718 if (oldlen != newlen) {
2719 ASSERT(oldlen > newlen);
2720 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2723 * Nothing to do for disk quota accounting here.
2725 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2730 * Convert a hole to a real allocation.
2732 STATIC int /* error */
2733 xfs_bmap_add_extent_hole_real(
2734 struct xfs_trans *tp,
2735 struct xfs_inode *ip,
2737 struct xfs_iext_cursor *icur,
2738 struct xfs_btree_cur **curp,
2739 struct xfs_bmbt_irec *new,
2743 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2744 struct xfs_mount *mp = ip->i_mount;
2745 struct xfs_btree_cur *cur = *curp;
2746 int error; /* error return value */
2747 int i; /* temp state */
2748 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2749 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2750 int rval=0; /* return value (logging flags) */
2751 int state = xfs_bmap_fork_to_state(whichfork);
2752 struct xfs_bmbt_irec old;
2754 ASSERT(!isnullstartblock(new->br_startblock));
2755 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2757 XFS_STATS_INC(mp, xs_add_exlist);
2760 * Check and set flags if this segment has a left neighbor.
2762 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2763 state |= BMAP_LEFT_VALID;
2764 if (isnullstartblock(left.br_startblock))
2765 state |= BMAP_LEFT_DELAY;
2769 * Check and set flags if this segment has a current value.
2770 * Not true if we're inserting into the "hole" at eof.
2772 if (xfs_iext_get_extent(ifp, icur, &right)) {
2773 state |= BMAP_RIGHT_VALID;
2774 if (isnullstartblock(right.br_startblock))
2775 state |= BMAP_RIGHT_DELAY;
2779 * We're inserting a real allocation between "left" and "right".
2780 * Set the contiguity flags. Don't let extents get too large.
2782 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2783 left.br_startoff + left.br_blockcount == new->br_startoff &&
2784 left.br_startblock + left.br_blockcount == new->br_startblock &&
2785 left.br_state == new->br_state &&
2786 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2787 state |= BMAP_LEFT_CONTIG;
2789 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2790 new->br_startoff + new->br_blockcount == right.br_startoff &&
2791 new->br_startblock + new->br_blockcount == right.br_startblock &&
2792 new->br_state == right.br_state &&
2793 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2794 (!(state & BMAP_LEFT_CONTIG) ||
2795 left.br_blockcount + new->br_blockcount +
2796 right.br_blockcount <= MAXEXTLEN))
2797 state |= BMAP_RIGHT_CONTIG;
2801 * Select which case we're in here, and implement it.
2803 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2804 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2806 * New allocation is contiguous with real allocations on the
2807 * left and on the right.
2808 * Merge all three into a single extent record.
2810 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2812 xfs_iext_remove(ip, icur, state);
2813 xfs_iext_prev(ifp, icur);
2814 xfs_iext_update_extent(ip, state, icur, &left);
2816 XFS_IFORK_NEXT_SET(ip, whichfork,
2817 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2819 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2821 rval = XFS_ILOG_CORE;
2822 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2825 if (XFS_IS_CORRUPT(mp, i != 1)) {
2826 error = -EFSCORRUPTED;
2829 error = xfs_btree_delete(cur, &i);
2832 if (XFS_IS_CORRUPT(mp, i != 1)) {
2833 error = -EFSCORRUPTED;
2836 error = xfs_btree_decrement(cur, 0, &i);
2839 if (XFS_IS_CORRUPT(mp, i != 1)) {
2840 error = -EFSCORRUPTED;
2843 error = xfs_bmbt_update(cur, &left);
2849 case BMAP_LEFT_CONTIG:
2851 * New allocation is contiguous with a real allocation
2853 * Merge the new allocation with the left neighbor.
2856 left.br_blockcount += new->br_blockcount;
2858 xfs_iext_prev(ifp, icur);
2859 xfs_iext_update_extent(ip, state, icur, &left);
2862 rval = xfs_ilog_fext(whichfork);
2865 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2868 if (XFS_IS_CORRUPT(mp, i != 1)) {
2869 error = -EFSCORRUPTED;
2872 error = xfs_bmbt_update(cur, &left);
2878 case BMAP_RIGHT_CONTIG:
2880 * New allocation is contiguous with a real allocation
2882 * Merge the new allocation with the right neighbor.
2886 right.br_startoff = new->br_startoff;
2887 right.br_startblock = new->br_startblock;
2888 right.br_blockcount += new->br_blockcount;
2889 xfs_iext_update_extent(ip, state, icur, &right);
2892 rval = xfs_ilog_fext(whichfork);
2895 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2898 if (XFS_IS_CORRUPT(mp, i != 1)) {
2899 error = -EFSCORRUPTED;
2902 error = xfs_bmbt_update(cur, &right);
2910 * New allocation is not contiguous with another
2912 * Insert a new entry.
2914 xfs_iext_insert(ip, icur, new, state);
2915 XFS_IFORK_NEXT_SET(ip, whichfork,
2916 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2918 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2920 rval = XFS_ILOG_CORE;
2921 error = xfs_bmbt_lookup_eq(cur, new, &i);
2924 if (XFS_IS_CORRUPT(mp, i != 0)) {
2925 error = -EFSCORRUPTED;
2928 error = xfs_btree_insert(cur, &i);
2931 if (XFS_IS_CORRUPT(mp, i != 1)) {
2932 error = -EFSCORRUPTED;
2939 /* add reverse mapping unless caller opted out */
2940 if (!(flags & XFS_BMAPI_NORMAP))
2941 xfs_rmap_map_extent(tp, ip, whichfork, new);
2943 /* convert to a btree if necessary */
2944 if (xfs_bmap_needs_btree(ip, whichfork)) {
2945 int tmp_logflags; /* partial log flag return val */
2947 ASSERT(cur == NULL);
2948 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2949 &tmp_logflags, whichfork);
2950 *logflagsp |= tmp_logflags;
2956 /* clear out the allocated field, done with it now in any case. */
2958 cur->bc_private.b.allocated = 0;
2960 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2967 * Functions used in the extent read, allocate and remove paths
2971 * Adjust the size of the new extent based on di_extsize and rt extsize.
2974 xfs_bmap_extsize_align(
2976 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2977 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2978 xfs_extlen_t extsz, /* align to this extent size */
2979 int rt, /* is this a realtime inode? */
2980 int eof, /* is extent at end-of-file? */
2981 int delay, /* creating delalloc extent? */
2982 int convert, /* overwriting unwritten extent? */
2983 xfs_fileoff_t *offp, /* in/out: aligned offset */
2984 xfs_extlen_t *lenp) /* in/out: aligned length */
2986 xfs_fileoff_t orig_off; /* original offset */
2987 xfs_extlen_t orig_alen; /* original length */
2988 xfs_fileoff_t orig_end; /* original off+len */
2989 xfs_fileoff_t nexto; /* next file offset */
2990 xfs_fileoff_t prevo; /* previous file offset */
2991 xfs_fileoff_t align_off; /* temp for offset */
2992 xfs_extlen_t align_alen; /* temp for length */
2993 xfs_extlen_t temp; /* temp for calculations */
2998 orig_off = align_off = *offp;
2999 orig_alen = align_alen = *lenp;
3000 orig_end = orig_off + orig_alen;
3003 * If this request overlaps an existing extent, then don't
3004 * attempt to perform any additional alignment.
3006 if (!delay && !eof &&
3007 (orig_off >= gotp->br_startoff) &&
3008 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3013 * If the file offset is unaligned vs. the extent size
3014 * we need to align it. This will be possible unless
3015 * the file was previously written with a kernel that didn't
3016 * perform this alignment, or if a truncate shot us in the
3019 div_u64_rem(orig_off, extsz, &temp);
3025 /* Same adjustment for the end of the requested area. */
3026 temp = (align_alen % extsz);
3028 align_alen += extsz - temp;
3031 * For large extent hint sizes, the aligned extent might be larger than
3032 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3033 * the length back under MAXEXTLEN. The outer allocation loops handle
3034 * short allocation just fine, so it is safe to do this. We only want to
3035 * do it when we are forced to, though, because it means more allocation
3036 * operations are required.
3038 while (align_alen > MAXEXTLEN)
3039 align_alen -= extsz;
3040 ASSERT(align_alen <= MAXEXTLEN);
3043 * If the previous block overlaps with this proposed allocation
3044 * then move the start forward without adjusting the length.
3046 if (prevp->br_startoff != NULLFILEOFF) {
3047 if (prevp->br_startblock == HOLESTARTBLOCK)
3048 prevo = prevp->br_startoff;
3050 prevo = prevp->br_startoff + prevp->br_blockcount;
3053 if (align_off != orig_off && align_off < prevo)
3056 * If the next block overlaps with this proposed allocation
3057 * then move the start back without adjusting the length,
3058 * but not before offset 0.
3059 * This may of course make the start overlap previous block,
3060 * and if we hit the offset 0 limit then the next block
3061 * can still overlap too.
3063 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3064 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3065 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3066 nexto = gotp->br_startoff + gotp->br_blockcount;
3068 nexto = gotp->br_startoff;
3070 nexto = NULLFILEOFF;
3072 align_off + align_alen != orig_end &&
3073 align_off + align_alen > nexto)
3074 align_off = nexto > align_alen ? nexto - align_alen : 0;
3076 * If we're now overlapping the next or previous extent that
3077 * means we can't fit an extsz piece in this hole. Just move
3078 * the start forward to the first valid spot and set
3079 * the length so we hit the end.
3081 if (align_off != orig_off && align_off < prevo)
3083 if (align_off + align_alen != orig_end &&
3084 align_off + align_alen > nexto &&
3085 nexto != NULLFILEOFF) {
3086 ASSERT(nexto > prevo);
3087 align_alen = nexto - align_off;
3091 * If realtime, and the result isn't a multiple of the realtime
3092 * extent size we need to remove blocks until it is.
3094 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3096 * We're not covering the original request, or
3097 * we won't be able to once we fix the length.
3099 if (orig_off < align_off ||
3100 orig_end > align_off + align_alen ||
3101 align_alen - temp < orig_alen)
3104 * Try to fix it by moving the start up.
3106 if (align_off + temp <= orig_off) {
3111 * Try to fix it by moving the end in.
3113 else if (align_off + align_alen - temp >= orig_end)
3116 * Set the start to the minimum then trim the length.
3119 align_alen -= orig_off - align_off;
3120 align_off = orig_off;
3121 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3124 * Result doesn't cover the request, fail it.
3126 if (orig_off < align_off || orig_end > align_off + align_alen)
3129 ASSERT(orig_off >= align_off);
3130 /* see MAXEXTLEN handling above */
3131 ASSERT(orig_end <= align_off + align_alen ||
3132 align_alen + extsz > MAXEXTLEN);
3136 if (!eof && gotp->br_startoff != NULLFILEOFF)
3137 ASSERT(align_off + align_alen <= gotp->br_startoff);
3138 if (prevp->br_startoff != NULLFILEOFF)
3139 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3147 #define XFS_ALLOC_GAP_UNITS 4
3151 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3153 xfs_fsblock_t adjust; /* adjustment to block numbers */
3154 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3155 xfs_mount_t *mp; /* mount point structure */
3156 int nullfb; /* true if ap->firstblock isn't set */
3157 int rt; /* true if inode is realtime */
3159 #define ISVALID(x,y) \
3161 (x) < mp->m_sb.sb_rblocks : \
3162 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3163 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3164 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3166 mp = ap->ip->i_mount;
3167 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3168 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3169 (ap->datatype & XFS_ALLOC_USERDATA);
3170 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3171 ap->tp->t_firstblock);
3173 * If allocating at eof, and there's a previous real block,
3174 * try to use its last block as our starting point.
3176 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3177 !isnullstartblock(ap->prev.br_startblock) &&
3178 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3179 ap->prev.br_startblock)) {
3180 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3182 * Adjust for the gap between prevp and us.
3184 adjust = ap->offset -
3185 (ap->prev.br_startoff + ap->prev.br_blockcount);
3187 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3188 ap->blkno += adjust;
3191 * If not at eof, then compare the two neighbor blocks.
3192 * Figure out whether either one gives us a good starting point,
3193 * and pick the better one.
3195 else if (!ap->eof) {
3196 xfs_fsblock_t gotbno; /* right side block number */
3197 xfs_fsblock_t gotdiff=0; /* right side difference */
3198 xfs_fsblock_t prevbno; /* left side block number */
3199 xfs_fsblock_t prevdiff=0; /* left side difference */
3202 * If there's a previous (left) block, select a requested
3203 * start block based on it.
3205 if (ap->prev.br_startoff != NULLFILEOFF &&
3206 !isnullstartblock(ap->prev.br_startblock) &&
3207 (prevbno = ap->prev.br_startblock +
3208 ap->prev.br_blockcount) &&
3209 ISVALID(prevbno, ap->prev.br_startblock)) {
3211 * Calculate gap to end of previous block.
3213 adjust = prevdiff = ap->offset -
3214 (ap->prev.br_startoff +
3215 ap->prev.br_blockcount);
3217 * Figure the startblock based on the previous block's
3218 * end and the gap size.
3220 * If the gap is large relative to the piece we're
3221 * allocating, or using it gives us an invalid block
3222 * number, then just use the end of the previous block.
3224 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3225 ISVALID(prevbno + prevdiff,
3226 ap->prev.br_startblock))
3231 * If the firstblock forbids it, can't use it,
3234 if (!rt && !nullfb &&
3235 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3236 prevbno = NULLFSBLOCK;
3239 * No previous block or can't follow it, just default.
3242 prevbno = NULLFSBLOCK;
3244 * If there's a following (right) block, select a requested
3245 * start block based on it.
3247 if (!isnullstartblock(ap->got.br_startblock)) {
3249 * Calculate gap to start of next block.
3251 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3253 * Figure the startblock based on the next block's
3254 * start and the gap size.
3256 gotbno = ap->got.br_startblock;
3259 * If the gap is large relative to the piece we're
3260 * allocating, or using it gives us an invalid block
3261 * number, then just use the start of the next block
3262 * offset by our length.
3264 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3265 ISVALID(gotbno - gotdiff, gotbno))
3267 else if (ISVALID(gotbno - ap->length, gotbno)) {
3268 gotbno -= ap->length;
3269 gotdiff += adjust - ap->length;
3273 * If the firstblock forbids it, can't use it,
3276 if (!rt && !nullfb &&
3277 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3278 gotbno = NULLFSBLOCK;
3281 * No next block, just default.
3284 gotbno = NULLFSBLOCK;
3286 * If both valid, pick the better one, else the only good
3287 * one, else ap->blkno is already set (to 0 or the inode block).
3289 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3290 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3291 else if (prevbno != NULLFSBLOCK)
3292 ap->blkno = prevbno;
3293 else if (gotbno != NULLFSBLOCK)
3300 xfs_bmap_longest_free_extent(
3301 struct xfs_trans *tp,
3306 struct xfs_mount *mp = tp->t_mountp;
3307 struct xfs_perag *pag;
3308 xfs_extlen_t longest;
3311 pag = xfs_perag_get(mp, ag);
3312 if (!pag->pagf_init) {
3313 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3315 /* Couldn't lock the AGF, so skip this AG. */
3316 if (error == -EAGAIN) {
3324 longest = xfs_alloc_longest_free_extent(pag,
3325 xfs_alloc_min_freelist(mp, pag),
3326 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3327 if (*blen < longest)
3336 xfs_bmap_select_minlen(
3337 struct xfs_bmalloca *ap,
3338 struct xfs_alloc_arg *args,
3342 if (notinit || *blen < ap->minlen) {
3344 * Since we did a BUF_TRYLOCK above, it is possible that
3345 * there is space for this request.
3347 args->minlen = ap->minlen;
3348 } else if (*blen < args->maxlen) {
3350 * If the best seen length is less than the request length,
3351 * use the best as the minimum.
3353 args->minlen = *blen;
3356 * Otherwise we've seen an extent as big as maxlen, use that
3359 args->minlen = args->maxlen;
3364 xfs_bmap_btalloc_nullfb(
3365 struct xfs_bmalloca *ap,
3366 struct xfs_alloc_arg *args,
3369 struct xfs_mount *mp = ap->ip->i_mount;
3370 xfs_agnumber_t ag, startag;
3374 args->type = XFS_ALLOCTYPE_START_BNO;
3375 args->total = ap->total;
3377 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3378 if (startag == NULLAGNUMBER)
3381 while (*blen < args->maxlen) {
3382 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3387 if (++ag == mp->m_sb.sb_agcount)
3393 xfs_bmap_select_minlen(ap, args, blen, notinit);
3398 xfs_bmap_btalloc_filestreams(
3399 struct xfs_bmalloca *ap,
3400 struct xfs_alloc_arg *args,
3403 struct xfs_mount *mp = ap->ip->i_mount;
3408 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3409 args->total = ap->total;
3411 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3412 if (ag == NULLAGNUMBER)
3415 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3419 if (*blen < args->maxlen) {
3420 error = xfs_filestream_new_ag(ap, &ag);
3424 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3431 xfs_bmap_select_minlen(ap, args, blen, notinit);
3434 * Set the failure fallback case to look in the selected AG as stream
3437 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3441 /* Update all inode and quota accounting for the allocation we just did. */
3443 xfs_bmap_btalloc_accounting(
3444 struct xfs_bmalloca *ap,
3445 struct xfs_alloc_arg *args)
3447 if (ap->flags & XFS_BMAPI_COWFORK) {
3449 * COW fork blocks are in-core only and thus are treated as
3450 * in-core quota reservation (like delalloc blocks) even when
3451 * converted to real blocks. The quota reservation is not
3452 * accounted to disk until blocks are remapped to the data
3453 * fork. So if these blocks were previously delalloc, we
3454 * already have quota reservation and there's nothing to do
3458 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3463 * Otherwise, we've allocated blocks in a hole. The transaction
3464 * has acquired in-core quota reservation for this extent.
3465 * Rather than account these as real blocks, however, we reduce
3466 * the transaction quota reservation based on the allocation.
3467 * This essentially transfers the transaction quota reservation
3468 * to that of a delalloc extent.
3470 ap->ip->i_delayed_blks += args->len;
3471 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3476 /* data/attr fork only */
3477 ap->ip->i_d.di_nblocks += args->len;
3478 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3480 ap->ip->i_delayed_blks -= args->len;
3481 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3483 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3484 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3490 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3492 xfs_mount_t *mp; /* mount point structure */
3493 xfs_alloctype_t atype = 0; /* type for allocation routines */
3494 xfs_extlen_t align = 0; /* minimum allocation alignment */
3495 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3497 xfs_alloc_arg_t args;
3498 xfs_fileoff_t orig_offset;
3499 xfs_extlen_t orig_length;
3501 xfs_extlen_t nextminlen = 0;
3502 int nullfb; /* true if ap->firstblock isn't set */
3509 orig_offset = ap->offset;
3510 orig_length = ap->length;
3512 mp = ap->ip->i_mount;
3514 /* stripe alignment for allocation is determined by mount parameters */
3516 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3517 stripe_align = mp->m_swidth;
3518 else if (mp->m_dalign)
3519 stripe_align = mp->m_dalign;
3521 if (ap->flags & XFS_BMAPI_COWFORK)
3522 align = xfs_get_cowextsz_hint(ap->ip);
3523 else if (ap->datatype & XFS_ALLOC_USERDATA)
3524 align = xfs_get_extsz_hint(ap->ip);
3526 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3527 align, 0, ap->eof, 0, ap->conv,
3528 &ap->offset, &ap->length);
3534 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3535 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3536 ap->tp->t_firstblock);
3538 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3539 xfs_inode_is_filestream(ap->ip)) {
3540 ag = xfs_filestream_lookup_ag(ap->ip);
3541 ag = (ag != NULLAGNUMBER) ? ag : 0;
3542 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3544 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3547 ap->blkno = ap->tp->t_firstblock;
3549 xfs_bmap_adjacent(ap);
3552 * If allowed, use ap->blkno; otherwise must use firstblock since
3553 * it's in the right allocation group.
3555 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3558 ap->blkno = ap->tp->t_firstblock;
3560 * Normal allocation, done through xfs_alloc_vextent.
3562 tryagain = isaligned = 0;
3563 memset(&args, 0, sizeof(args));
3566 args.fsbno = ap->blkno;
3567 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3569 /* Trim the allocation back to the maximum an AG can fit. */
3570 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3574 * Search for an allocation group with a single extent large
3575 * enough for the request. If one isn't found, then adjust
3576 * the minimum allocation size to the largest space found.
3578 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3579 xfs_inode_is_filestream(ap->ip))
3580 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3582 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3585 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3586 if (xfs_inode_is_filestream(ap->ip))
3587 args.type = XFS_ALLOCTYPE_FIRST_AG;
3589 args.type = XFS_ALLOCTYPE_START_BNO;
3590 args.total = args.minlen = ap->minlen;
3592 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3593 args.total = ap->total;
3594 args.minlen = ap->minlen;
3596 /* apply extent size hints if obtained earlier */
3599 div_u64_rem(ap->offset, args.prod, &args.mod);
3601 args.mod = args.prod - args.mod;
3602 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3606 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3607 div_u64_rem(ap->offset, args.prod, &args.mod);
3609 args.mod = args.prod - args.mod;
3612 * If we are not low on available data blocks, and the underlying
3613 * logical volume manager is a stripe, and the file offset is zero then
3614 * try to allocate data blocks on stripe unit boundary. NOTE: ap->aeof
3615 * is only set if the allocation length is >= the stripe unit and the
3616 * allocation offset is at the end of file.
3618 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3620 args.alignment = stripe_align;
3624 * Adjust minlen to try and preserve alignment if we
3625 * can't guarantee an aligned maxlen extent.
3627 if (blen > args.alignment &&
3628 blen <= args.maxlen + args.alignment)
3629 args.minlen = blen - args.alignment;
3630 args.minalignslop = 0;
3633 * First try an exact bno allocation.
3634 * If it fails then do a near or start bno
3635 * allocation with alignment turned on.
3639 args.type = XFS_ALLOCTYPE_THIS_BNO;
3642 * Compute the minlen+alignment for the
3643 * next case. Set slop so that the value
3644 * of minlen+alignment+slop doesn't go up
3645 * between the calls.
3647 if (blen > stripe_align && blen <= args.maxlen)
3648 nextminlen = blen - stripe_align;
3650 nextminlen = args.minlen;
3651 if (nextminlen + stripe_align > args.minlen + 1)
3653 nextminlen + stripe_align -
3656 args.minalignslop = 0;
3660 args.minalignslop = 0;
3662 args.minleft = ap->minleft;
3663 args.wasdel = ap->wasdel;
3664 args.resv = XFS_AG_RESV_NONE;
3665 args.datatype = ap->datatype;
3667 error = xfs_alloc_vextent(&args);
3671 if (tryagain && args.fsbno == NULLFSBLOCK) {
3673 * Exact allocation failed. Now try with alignment
3677 args.fsbno = ap->blkno;
3678 args.alignment = stripe_align;
3679 args.minlen = nextminlen;
3680 args.minalignslop = 0;
3682 if ((error = xfs_alloc_vextent(&args)))
3685 if (isaligned && args.fsbno == NULLFSBLOCK) {
3687 * allocation failed, so turn off alignment and
3691 args.fsbno = ap->blkno;
3693 if ((error = xfs_alloc_vextent(&args)))
3696 if (args.fsbno == NULLFSBLOCK && nullfb &&
3697 args.minlen > ap->minlen) {
3698 args.minlen = ap->minlen;
3699 args.type = XFS_ALLOCTYPE_START_BNO;
3700 args.fsbno = ap->blkno;
3701 if ((error = xfs_alloc_vextent(&args)))
3704 if (args.fsbno == NULLFSBLOCK && nullfb) {
3706 args.type = XFS_ALLOCTYPE_FIRST_AG;
3707 args.total = ap->minlen;
3708 if ((error = xfs_alloc_vextent(&args)))
3710 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3712 if (args.fsbno != NULLFSBLOCK) {
3714 * check the allocation happened at the same or higher AG than
3715 * the first block that was allocated.
3717 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
3718 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
3719 XFS_FSB_TO_AGNO(mp, args.fsbno));
3721 ap->blkno = args.fsbno;
3722 if (ap->tp->t_firstblock == NULLFSBLOCK)
3723 ap->tp->t_firstblock = args.fsbno;
3724 ASSERT(nullfb || fb_agno <= args.agno);
3725 ap->length = args.len;
3727 * If the extent size hint is active, we tried to round the
3728 * caller's allocation request offset down to extsz and the
3729 * length up to another extsz boundary. If we found a free
3730 * extent we mapped it in starting at this new offset. If the
3731 * newly mapped space isn't long enough to cover any of the
3732 * range of offsets that was originally requested, move the
3733 * mapping up so that we can fill as much of the caller's
3734 * original request as possible. Free space is apparently
3735 * very fragmented so we're unlikely to be able to satisfy the
3738 if (ap->length <= orig_length)
3739 ap->offset = orig_offset;
3740 else if (ap->offset + ap->length < orig_offset + orig_length)
3741 ap->offset = orig_offset + orig_length - ap->length;
3742 xfs_bmap_btalloc_accounting(ap, &args);
3744 ap->blkno = NULLFSBLOCK;
3750 /* Trim extent to fit a logical block range. */
3753 struct xfs_bmbt_irec *irec,
3757 xfs_fileoff_t distance;
3758 xfs_fileoff_t end = bno + len;
3760 if (irec->br_startoff + irec->br_blockcount <= bno ||
3761 irec->br_startoff >= end) {
3762 irec->br_blockcount = 0;
3766 if (irec->br_startoff < bno) {
3767 distance = bno - irec->br_startoff;
3768 if (isnullstartblock(irec->br_startblock))
3769 irec->br_startblock = DELAYSTARTBLOCK;
3770 if (irec->br_startblock != DELAYSTARTBLOCK &&
3771 irec->br_startblock != HOLESTARTBLOCK)
3772 irec->br_startblock += distance;
3773 irec->br_startoff += distance;
3774 irec->br_blockcount -= distance;
3777 if (end < irec->br_startoff + irec->br_blockcount) {
3778 distance = irec->br_startoff + irec->br_blockcount - end;
3779 irec->br_blockcount -= distance;
3784 * Trim the returned map to the required bounds
3788 struct xfs_bmbt_irec *mval,
3789 struct xfs_bmbt_irec *got,
3797 if ((flags & XFS_BMAPI_ENTIRE) ||
3798 got->br_startoff + got->br_blockcount <= obno) {
3800 if (isnullstartblock(got->br_startblock))
3801 mval->br_startblock = DELAYSTARTBLOCK;
3807 ASSERT((*bno >= obno) || (n == 0));
3809 mval->br_startoff = *bno;
3810 if (isnullstartblock(got->br_startblock))
3811 mval->br_startblock = DELAYSTARTBLOCK;
3813 mval->br_startblock = got->br_startblock +
3814 (*bno - got->br_startoff);
3816 * Return the minimum of what we got and what we asked for for
3817 * the length. We can use the len variable here because it is
3818 * modified below and we could have been there before coming
3819 * here if the first part of the allocation didn't overlap what
3822 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3823 got->br_blockcount - (*bno - got->br_startoff));
3824 mval->br_state = got->br_state;
3825 ASSERT(mval->br_blockcount <= len);
3830 * Update and validate the extent map to return
3833 xfs_bmapi_update_map(
3834 struct xfs_bmbt_irec **map,
3842 xfs_bmbt_irec_t *mval = *map;
3844 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3845 ((mval->br_startoff + mval->br_blockcount) <= end));
3846 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3847 (mval->br_startoff < obno));
3849 *bno = mval->br_startoff + mval->br_blockcount;
3851 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3852 /* update previous map with new information */
3853 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3854 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3855 ASSERT(mval->br_state == mval[-1].br_state);
3856 mval[-1].br_blockcount = mval->br_blockcount;
3857 mval[-1].br_state = mval->br_state;
3858 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3859 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3860 mval[-1].br_startblock != HOLESTARTBLOCK &&
3861 mval->br_startblock == mval[-1].br_startblock +
3862 mval[-1].br_blockcount &&
3863 mval[-1].br_state == mval->br_state) {
3864 ASSERT(mval->br_startoff ==
3865 mval[-1].br_startoff + mval[-1].br_blockcount);
3866 mval[-1].br_blockcount += mval->br_blockcount;
3867 } else if (*n > 0 &&
3868 mval->br_startblock == DELAYSTARTBLOCK &&
3869 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3870 mval->br_startoff ==
3871 mval[-1].br_startoff + mval[-1].br_blockcount) {
3872 mval[-1].br_blockcount += mval->br_blockcount;
3873 mval[-1].br_state = mval->br_state;
3874 } else if (!((*n == 0) &&
3875 ((mval->br_startoff + mval->br_blockcount) <=
3884 * Map file blocks to filesystem blocks without allocation.
3888 struct xfs_inode *ip,
3891 struct xfs_bmbt_irec *mval,
3895 struct xfs_mount *mp = ip->i_mount;
3896 struct xfs_ifork *ifp;
3897 struct xfs_bmbt_irec got;
3900 struct xfs_iext_cursor icur;
3904 int whichfork = xfs_bmapi_whichfork(flags);
3907 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3908 XFS_BMAPI_COWFORK)));
3909 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3911 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
3912 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
3913 return -EFSCORRUPTED;
3916 if (XFS_FORCED_SHUTDOWN(mp))
3919 XFS_STATS_INC(mp, xs_blk_mapr);
3921 ifp = XFS_IFORK_PTR(ip, whichfork);
3923 /* No CoW fork? Return a hole. */
3924 if (whichfork == XFS_COW_FORK) {
3925 mval->br_startoff = bno;
3926 mval->br_startblock = HOLESTARTBLOCK;
3927 mval->br_blockcount = len;
3928 mval->br_state = XFS_EXT_NORM;
3934 * A missing attr ifork implies that the inode says we're in
3935 * extents or btree format but failed to pass the inode fork
3936 * verifier while trying to load it. Treat that as a file
3940 xfs_alert(mp, "%s: inode %llu missing fork %d",
3941 __func__, ip->i_ino, whichfork);
3943 return -EFSCORRUPTED;
3946 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3947 error = xfs_iread_extents(NULL, ip, whichfork);
3952 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3957 while (bno < end && n < *nmap) {
3958 /* Reading past eof, act as though there's a hole up to end. */
3960 got.br_startoff = end;
3961 if (got.br_startoff > bno) {
3962 /* Reading in a hole. */
3963 mval->br_startoff = bno;
3964 mval->br_startblock = HOLESTARTBLOCK;
3965 mval->br_blockcount =
3966 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3967 mval->br_state = XFS_EXT_NORM;
3968 bno += mval->br_blockcount;
3969 len -= mval->br_blockcount;
3975 /* set up the extent map to return. */
3976 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3977 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3979 /* If we're done, stop now. */
3980 if (bno >= end || n >= *nmap)
3983 /* Else go on to the next record. */
3984 if (!xfs_iext_next_extent(ifp, &icur, &got))
3992 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3993 * global pool and the extent inserted into the inode in-core extent tree.
3995 * On entry, got refers to the first extent beyond the offset of the extent to
3996 * allocate or eof is specified if no such extent exists. On return, got refers
3997 * to the extent record that was inserted to the inode fork.
3999 * Note that the allocated extent may have been merged with contiguous extents
4000 * during insertion into the inode fork. Thus, got does not reflect the current
4001 * state of the inode fork on return. If necessary, the caller can use lastx to
4002 * look up the updated record in the inode fork.
4005 xfs_bmapi_reserve_delalloc(
4006 struct xfs_inode *ip,
4010 xfs_filblks_t prealloc,
4011 struct xfs_bmbt_irec *got,
4012 struct xfs_iext_cursor *icur,
4015 struct xfs_mount *mp = ip->i_mount;
4016 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4018 xfs_extlen_t indlen;
4020 xfs_fileoff_t aoff = off;
4023 * Cap the alloc length. Keep track of prealloc so we know whether to
4024 * tag the inode before we return.
4026 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4028 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4029 if (prealloc && alen >= len)
4030 prealloc = alen - len;
4032 /* Figure out the extent size, adjust alen */
4033 if (whichfork == XFS_COW_FORK) {
4034 struct xfs_bmbt_irec prev;
4035 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
4037 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4038 prev.br_startoff = NULLFILEOFF;
4040 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4041 1, 0, &aoff, &alen);
4046 * Make a transaction-less quota reservation for delayed allocation
4047 * blocks. This number gets adjusted later. We return if we haven't
4048 * allocated blocks already inside this loop.
4050 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4051 XFS_QMOPT_RES_REGBLKS);
4056 * Split changing sb for alen and indlen since they could be coming
4057 * from different places.
4059 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4062 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4064 goto out_unreserve_quota;
4066 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4068 goto out_unreserve_blocks;
4071 ip->i_delayed_blks += alen;
4072 xfs_mod_delalloc(ip->i_mount, alen + indlen);
4074 got->br_startoff = aoff;
4075 got->br_startblock = nullstartblock(indlen);
4076 got->br_blockcount = alen;
4077 got->br_state = XFS_EXT_NORM;
4079 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4082 * Tag the inode if blocks were preallocated. Note that COW fork
4083 * preallocation can occur at the start or end of the extent, even when
4084 * prealloc == 0, so we must also check the aligned offset and length.
4086 if (whichfork == XFS_DATA_FORK && prealloc)
4087 xfs_inode_set_eofblocks_tag(ip);
4088 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4089 xfs_inode_set_cowblocks_tag(ip);
4093 out_unreserve_blocks:
4094 xfs_mod_fdblocks(mp, alen, false);
4095 out_unreserve_quota:
4096 if (XFS_IS_QUOTA_ON(mp))
4097 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
4098 XFS_QMOPT_RES_REGBLKS);
4103 xfs_bmap_alloc_userdata(
4104 struct xfs_bmalloca *bma)
4106 struct xfs_mount *mp = bma->ip->i_mount;
4107 int whichfork = xfs_bmapi_whichfork(bma->flags);
4111 * Set the data type being allocated. For the data fork, the first data
4112 * in the file is treated differently to all other allocations. For the
4113 * attribute fork, we only need to ensure the allocated range is not on
4116 bma->datatype = XFS_ALLOC_NOBUSY;
4117 if (whichfork == XFS_DATA_FORK) {
4118 bma->datatype |= XFS_ALLOC_USERDATA;
4119 if (bma->offset == 0)
4120 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4122 if (mp->m_dalign && bma->length >= mp->m_dalign) {
4123 error = xfs_bmap_isaeof(bma, whichfork);
4128 if (XFS_IS_REALTIME_INODE(bma->ip))
4129 return xfs_bmap_rtalloc(bma);
4132 return xfs_bmap_btalloc(bma);
4137 struct xfs_bmalloca *bma)
4139 struct xfs_mount *mp = bma->ip->i_mount;
4140 int whichfork = xfs_bmapi_whichfork(bma->flags);
4141 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4142 int tmp_logflags = 0;
4145 ASSERT(bma->length > 0);
4148 * For the wasdelay case, we could also just allocate the stuff asked
4149 * for in this bmap call but that wouldn't be as good.
4152 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4153 bma->offset = bma->got.br_startoff;
4154 if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4155 bma->prev.br_startoff = NULLFILEOFF;
4157 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4159 bma->length = XFS_FILBLKS_MIN(bma->length,
4160 bma->got.br_startoff - bma->offset);
4163 if (bma->flags & XFS_BMAPI_CONTIG)
4164 bma->minlen = bma->length;
4168 if (bma->flags & XFS_BMAPI_METADATA)
4169 error = xfs_bmap_btalloc(bma);
4171 error = xfs_bmap_alloc_userdata(bma);
4172 if (error || bma->blkno == NULLFSBLOCK)
4175 if (bma->flags & XFS_BMAPI_ZERO) {
4176 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4181 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
4182 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4184 * Bump the number of extents we've allocated
4190 bma->cur->bc_private.b.flags =
4191 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4193 bma->got.br_startoff = bma->offset;
4194 bma->got.br_startblock = bma->blkno;
4195 bma->got.br_blockcount = bma->length;
4196 bma->got.br_state = XFS_EXT_NORM;
4199 * In the data fork, a wasdelay extent has been initialized, so
4200 * shouldn't be flagged as unwritten.
4202 * For the cow fork, however, we convert delalloc reservations
4203 * (extents allocated for speculative preallocation) to
4204 * allocated unwritten extents, and only convert the unwritten
4205 * extents to real extents when we're about to write the data.
4207 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4208 (bma->flags & XFS_BMAPI_PREALLOC))
4209 bma->got.br_state = XFS_EXT_UNWRITTEN;
4212 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4214 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4215 whichfork, &bma->icur, &bma->cur, &bma->got,
4216 &bma->logflags, bma->flags);
4218 bma->logflags |= tmp_logflags;
4223 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4224 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4225 * the neighbouring ones.
4227 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4229 ASSERT(bma->got.br_startoff <= bma->offset);
4230 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4231 bma->offset + bma->length);
4232 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4233 bma->got.br_state == XFS_EXT_UNWRITTEN);
4238 xfs_bmapi_convert_unwritten(
4239 struct xfs_bmalloca *bma,
4240 struct xfs_bmbt_irec *mval,
4244 int whichfork = xfs_bmapi_whichfork(flags);
4245 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4246 int tmp_logflags = 0;
4249 /* check if we need to do unwritten->real conversion */
4250 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4251 (flags & XFS_BMAPI_PREALLOC))
4254 /* check if we need to do real->unwritten conversion */
4255 if (mval->br_state == XFS_EXT_NORM &&
4256 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4257 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4261 * Modify (by adding) the state flag, if writing.
4263 ASSERT(mval->br_blockcount <= len);
4264 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4265 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4266 bma->ip, whichfork);
4268 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4269 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4272 * Before insertion into the bmbt, zero the range being converted
4275 if (flags & XFS_BMAPI_ZERO) {
4276 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4277 mval->br_blockcount);
4282 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4283 &bma->icur, &bma->cur, mval, &tmp_logflags);
4285 * Log the inode core unconditionally in the unwritten extent conversion
4286 * path because the conversion might not have done so (e.g., if the
4287 * extent count hasn't changed). We need to make sure the inode is dirty
4288 * in the transaction for the sake of fsync(), even if nothing has
4289 * changed, because fsync() will not force the log for this transaction
4290 * unless it sees the inode pinned.
4292 * Note: If we're only converting cow fork extents, there aren't
4293 * any on-disk updates to make, so we don't need to log anything.
4295 if (whichfork != XFS_COW_FORK)
4296 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4301 * Update our extent pointer, given that
4302 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4303 * of the neighbouring ones.
4305 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4308 * We may have combined previously unwritten space with written space,
4309 * so generate another request.
4311 if (mval->br_blockcount < len)
4316 static inline xfs_extlen_t
4318 struct xfs_trans *tp,
4319 struct xfs_inode *ip,
4322 if (tp && tp->t_firstblock != NULLFSBLOCK)
4324 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE)
4326 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1;
4330 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4331 * a case where the data is changed, there's an error, and it's not logged so we
4332 * don't shutdown when we should. Don't bother logging extents/btree changes if
4333 * we converted to the other format.
4337 struct xfs_bmalloca *bma,
4341 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4342 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4343 bma->logflags &= ~xfs_ilog_fext(whichfork);
4344 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4345 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE)
4346 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4349 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4351 xfs_btree_del_cursor(bma->cur, error);
4355 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4356 * extent state if necessary. Details behaviour is controlled by the flags
4357 * parameter. Only allocates blocks from a single allocation group, to avoid
4362 struct xfs_trans *tp, /* transaction pointer */
4363 struct xfs_inode *ip, /* incore inode */
4364 xfs_fileoff_t bno, /* starting file offs. mapped */
4365 xfs_filblks_t len, /* length to map in file */
4366 int flags, /* XFS_BMAPI_... */
4367 xfs_extlen_t total, /* total blocks needed */
4368 struct xfs_bmbt_irec *mval, /* output: map values */
4369 int *nmap) /* i/o: mval size/count */
4371 struct xfs_bmalloca bma = {
4376 struct xfs_mount *mp = ip->i_mount;
4377 struct xfs_ifork *ifp;
4378 xfs_fileoff_t end; /* end of mapped file region */
4379 bool eof = false; /* after the end of extents */
4380 int error; /* error return */
4381 int n; /* current extent index */
4382 xfs_fileoff_t obno; /* old block number (offset) */
4383 int whichfork; /* data or attr fork */
4386 xfs_fileoff_t orig_bno; /* original block number value */
4387 int orig_flags; /* original flags arg value */
4388 xfs_filblks_t orig_len; /* original value of len arg */
4389 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4390 int orig_nmap; /* original value of *nmap */
4398 whichfork = xfs_bmapi_whichfork(flags);
4401 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4404 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4405 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4406 ASSERT(!(flags & XFS_BMAPI_REMAP));
4408 /* zeroing is for currently only for data extents, not metadata */
4409 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4410 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4412 * we can allocate unwritten extents or pre-zero allocated blocks,
4413 * but it makes no sense to do both at once. This would result in
4414 * zeroing the unwritten extent twice, but it still being an
4415 * unwritten extent....
4417 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4418 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4420 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
4421 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4422 return -EFSCORRUPTED;
4425 if (XFS_FORCED_SHUTDOWN(mp))
4428 ifp = XFS_IFORK_PTR(ip, whichfork);
4430 XFS_STATS_INC(mp, xs_blk_mapw);
4432 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4433 error = xfs_iread_extents(tp, ip, whichfork);
4438 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4440 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4441 bma.prev.br_startoff = NULLFILEOFF;
4442 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4447 while (bno < end && n < *nmap) {
4448 bool need_alloc = false, wasdelay = false;
4450 /* in hole or beyond EOF? */
4451 if (eof || bma.got.br_startoff > bno) {
4453 * CoW fork conversions should /never/ hit EOF or
4454 * holes. There should always be something for us
4457 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4458 (flags & XFS_BMAPI_COWFORK)));
4461 } else if (isnullstartblock(bma.got.br_startblock)) {
4466 * First, deal with the hole before the allocated space
4467 * that we found, if any.
4469 if (need_alloc || wasdelay) {
4471 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4472 bma.wasdel = wasdelay;
4477 * There's a 32/64 bit type mismatch between the
4478 * allocation length request (which can be 64 bits in
4479 * length) and the bma length request, which is
4480 * xfs_extlen_t and therefore 32 bits. Hence we have to
4481 * check for 32-bit overflows and handle them here.
4483 if (len > (xfs_filblks_t)MAXEXTLEN)
4484 bma.length = MAXEXTLEN;
4489 ASSERT(bma.length > 0);
4490 error = xfs_bmapi_allocate(&bma);
4493 if (bma.blkno == NULLFSBLOCK)
4497 * If this is a CoW allocation, record the data in
4498 * the refcount btree for orphan recovery.
4500 if (whichfork == XFS_COW_FORK)
4501 xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4505 /* Deal with the allocated space we found. */
4506 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4509 /* Execute unwritten extent conversion if necessary */
4510 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4511 if (error == -EAGAIN)
4516 /* update the extent map to return */
4517 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4520 * If we're done, stop now. Stop when we've allocated
4521 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4522 * the transaction may get too big.
4524 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4527 /* Else go on to the next record. */
4529 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4534 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4539 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4540 XFS_IFORK_NEXTENTS(ip, whichfork) >
4541 XFS_IFORK_MAXEXT(ip, whichfork));
4542 xfs_bmapi_finish(&bma, whichfork, 0);
4543 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4547 xfs_bmapi_finish(&bma, whichfork, error);
4552 * Convert an existing delalloc extent to real blocks based on file offset. This
4553 * attempts to allocate the entire delalloc extent and may require multiple
4554 * invocations to allocate the target offset if a large enough physical extent
4558 xfs_bmapi_convert_delalloc(
4559 struct xfs_inode *ip,
4562 struct iomap *iomap,
4565 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4566 struct xfs_mount *mp = ip->i_mount;
4567 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
4568 struct xfs_bmalloca bma = { NULL };
4570 struct xfs_trans *tp;
4573 if (whichfork == XFS_COW_FORK)
4574 flags |= IOMAP_F_SHARED;
4577 * Space for the extent and indirect blocks was reserved when the
4578 * delalloc extent was created so there's no need to do so here.
4580 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4581 XFS_TRANS_RESERVE, &tp);
4585 xfs_ilock(ip, XFS_ILOCK_EXCL);
4586 xfs_trans_ijoin(tp, ip, 0);
4588 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4589 bma.got.br_startoff > offset_fsb) {
4591 * No extent found in the range we are trying to convert. This
4592 * should only happen for the COW fork, where another thread
4593 * might have moved the extent to the data fork in the meantime.
4595 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4597 goto out_trans_cancel;
4601 * If we find a real extent here we raced with another thread converting
4602 * the extent. Just return the real extent at this offset.
4604 if (!isnullstartblock(bma.got.br_startblock)) {
4605 xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4606 *seq = READ_ONCE(ifp->if_seq);
4607 goto out_trans_cancel;
4613 bma.offset = bma.got.br_startoff;
4614 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
4615 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4616 if (whichfork == XFS_COW_FORK)
4617 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
4619 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4620 bma.prev.br_startoff = NULLFILEOFF;
4622 error = xfs_bmapi_allocate(&bma);
4627 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4629 error = -EFSCORRUPTED;
4630 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4633 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4634 XFS_STATS_INC(mp, xs_xstrat_quick);
4636 ASSERT(!isnullstartblock(bma.got.br_startblock));
4637 xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4638 *seq = READ_ONCE(ifp->if_seq);
4640 if (whichfork == XFS_COW_FORK)
4641 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4643 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4648 xfs_bmapi_finish(&bma, whichfork, 0);
4649 error = xfs_trans_commit(tp);
4650 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4654 xfs_bmapi_finish(&bma, whichfork, error);
4656 xfs_trans_cancel(tp);
4657 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4663 struct xfs_trans *tp,
4664 struct xfs_inode *ip,
4667 xfs_fsblock_t startblock,
4670 struct xfs_mount *mp = ip->i_mount;
4671 struct xfs_ifork *ifp;
4672 struct xfs_btree_cur *cur = NULL;
4673 struct xfs_bmbt_irec got;
4674 struct xfs_iext_cursor icur;
4675 int whichfork = xfs_bmapi_whichfork(flags);
4676 int logflags = 0, error;
4678 ifp = XFS_IFORK_PTR(ip, whichfork);
4680 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4681 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4682 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4683 XFS_BMAPI_NORMAP)));
4684 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4685 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4687 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
4688 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4689 return -EFSCORRUPTED;
4692 if (XFS_FORCED_SHUTDOWN(mp))
4695 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4696 error = xfs_iread_extents(tp, ip, whichfork);
4701 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4702 /* make sure we only reflink into a hole. */
4703 ASSERT(got.br_startoff > bno);
4704 ASSERT(got.br_startoff - bno >= len);
4707 ip->i_d.di_nblocks += len;
4708 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4710 if (ifp->if_flags & XFS_IFBROOT) {
4711 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4712 cur->bc_private.b.flags = 0;
4715 got.br_startoff = bno;
4716 got.br_startblock = startblock;
4717 got.br_blockcount = len;
4718 if (flags & XFS_BMAPI_PREALLOC)
4719 got.br_state = XFS_EXT_UNWRITTEN;
4721 got.br_state = XFS_EXT_NORM;
4723 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4724 &cur, &got, &logflags, flags);
4728 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4731 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4732 logflags &= ~XFS_ILOG_DEXT;
4733 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4734 logflags &= ~XFS_ILOG_DBROOT;
4737 xfs_trans_log_inode(tp, ip, logflags);
4739 xfs_btree_del_cursor(cur, error);
4744 * When a delalloc extent is split (e.g., due to a hole punch), the original
4745 * indlen reservation must be shared across the two new extents that are left
4748 * Given the original reservation and the worst case indlen for the two new
4749 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4750 * reservation fairly across the two new extents. If necessary, steal available
4751 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4752 * ores == 1). The number of stolen blocks is returned. The availability and
4753 * subsequent accounting of stolen blocks is the responsibility of the caller.
4755 static xfs_filblks_t
4756 xfs_bmap_split_indlen(
4757 xfs_filblks_t ores, /* original res. */
4758 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4759 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4760 xfs_filblks_t avail) /* stealable blocks */
4762 xfs_filblks_t len1 = *indlen1;
4763 xfs_filblks_t len2 = *indlen2;
4764 xfs_filblks_t nres = len1 + len2; /* new total res. */
4765 xfs_filblks_t stolen = 0;
4766 xfs_filblks_t resfactor;
4769 * Steal as many blocks as we can to try and satisfy the worst case
4770 * indlen for both new extents.
4772 if (ores < nres && avail)
4773 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4776 /* nothing else to do if we've satisfied the new reservation */
4781 * We can't meet the total required reservation for the two extents.
4782 * Calculate the percent of the overall shortage between both extents
4783 * and apply this percentage to each of the requested indlen values.
4784 * This distributes the shortage fairly and reduces the chances that one
4785 * of the two extents is left with nothing when extents are repeatedly
4788 resfactor = (ores * 100);
4789 do_div(resfactor, nres);
4794 ASSERT(len1 + len2 <= ores);
4795 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4798 * Hand out the remainder to each extent. If one of the two reservations
4799 * is zero, we want to make sure that one gets a block first. The loop
4800 * below starts with len1, so hand len2 a block right off the bat if it
4803 ores -= (len1 + len2);
4804 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4805 if (ores && !len2 && *indlen2) {
4810 if (len1 < *indlen1) {
4816 if (len2 < *indlen2) {
4829 xfs_bmap_del_extent_delay(
4830 struct xfs_inode *ip,
4832 struct xfs_iext_cursor *icur,
4833 struct xfs_bmbt_irec *got,
4834 struct xfs_bmbt_irec *del)
4836 struct xfs_mount *mp = ip->i_mount;
4837 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4838 struct xfs_bmbt_irec new;
4839 int64_t da_old, da_new, da_diff = 0;
4840 xfs_fileoff_t del_endoff, got_endoff;
4841 xfs_filblks_t got_indlen, new_indlen, stolen;
4842 int state = xfs_bmap_fork_to_state(whichfork);
4846 XFS_STATS_INC(mp, xs_del_exlist);
4848 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4849 del_endoff = del->br_startoff + del->br_blockcount;
4850 got_endoff = got->br_startoff + got->br_blockcount;
4851 da_old = startblockval(got->br_startblock);
4854 ASSERT(del->br_blockcount > 0);
4855 ASSERT(got->br_startoff <= del->br_startoff);
4856 ASSERT(got_endoff >= del_endoff);
4859 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4861 do_div(rtexts, mp->m_sb.sb_rextsize);
4862 xfs_mod_frextents(mp, rtexts);
4866 * Update the inode delalloc counter now and wait to update the
4867 * sb counters as we might have to borrow some blocks for the
4868 * indirect block accounting.
4870 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4871 -((long)del->br_blockcount), 0,
4872 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4875 ip->i_delayed_blks -= del->br_blockcount;
4877 if (got->br_startoff == del->br_startoff)
4878 state |= BMAP_LEFT_FILLING;
4879 if (got_endoff == del_endoff)
4880 state |= BMAP_RIGHT_FILLING;
4882 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4883 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4885 * Matches the whole extent. Delete the entry.
4887 xfs_iext_remove(ip, icur, state);
4888 xfs_iext_prev(ifp, icur);
4890 case BMAP_LEFT_FILLING:
4892 * Deleting the first part of the extent.
4894 got->br_startoff = del_endoff;
4895 got->br_blockcount -= del->br_blockcount;
4896 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4897 got->br_blockcount), da_old);
4898 got->br_startblock = nullstartblock((int)da_new);
4899 xfs_iext_update_extent(ip, state, icur, got);
4901 case BMAP_RIGHT_FILLING:
4903 * Deleting the last part of the extent.
4905 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4906 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4907 got->br_blockcount), da_old);
4908 got->br_startblock = nullstartblock((int)da_new);
4909 xfs_iext_update_extent(ip, state, icur, got);
4913 * Deleting the middle of the extent.
4915 * Distribute the original indlen reservation across the two new
4916 * extents. Steal blocks from the deleted extent if necessary.
4917 * Stealing blocks simply fudges the fdblocks accounting below.
4918 * Warn if either of the new indlen reservations is zero as this
4919 * can lead to delalloc problems.
4921 got->br_blockcount = del->br_startoff - got->br_startoff;
4922 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4924 new.br_blockcount = got_endoff - del_endoff;
4925 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4927 WARN_ON_ONCE(!got_indlen || !new_indlen);
4928 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4929 del->br_blockcount);
4931 got->br_startblock = nullstartblock((int)got_indlen);
4933 new.br_startoff = del_endoff;
4934 new.br_state = got->br_state;
4935 new.br_startblock = nullstartblock((int)new_indlen);
4937 xfs_iext_update_extent(ip, state, icur, got);
4938 xfs_iext_next(ifp, icur);
4939 xfs_iext_insert(ip, icur, &new, state);
4941 da_new = got_indlen + new_indlen - stolen;
4942 del->br_blockcount -= stolen;
4946 ASSERT(da_old >= da_new);
4947 da_diff = da_old - da_new;
4949 da_diff += del->br_blockcount;
4951 xfs_mod_fdblocks(mp, da_diff, false);
4952 xfs_mod_delalloc(mp, -da_diff);
4958 xfs_bmap_del_extent_cow(
4959 struct xfs_inode *ip,
4960 struct xfs_iext_cursor *icur,
4961 struct xfs_bmbt_irec *got,
4962 struct xfs_bmbt_irec *del)
4964 struct xfs_mount *mp = ip->i_mount;
4965 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4966 struct xfs_bmbt_irec new;
4967 xfs_fileoff_t del_endoff, got_endoff;
4968 int state = BMAP_COWFORK;
4970 XFS_STATS_INC(mp, xs_del_exlist);
4972 del_endoff = del->br_startoff + del->br_blockcount;
4973 got_endoff = got->br_startoff + got->br_blockcount;
4975 ASSERT(del->br_blockcount > 0);
4976 ASSERT(got->br_startoff <= del->br_startoff);
4977 ASSERT(got_endoff >= del_endoff);
4978 ASSERT(!isnullstartblock(got->br_startblock));
4980 if (got->br_startoff == del->br_startoff)
4981 state |= BMAP_LEFT_FILLING;
4982 if (got_endoff == del_endoff)
4983 state |= BMAP_RIGHT_FILLING;
4985 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4986 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4988 * Matches the whole extent. Delete the entry.
4990 xfs_iext_remove(ip, icur, state);
4991 xfs_iext_prev(ifp, icur);
4993 case BMAP_LEFT_FILLING:
4995 * Deleting the first part of the extent.
4997 got->br_startoff = del_endoff;
4998 got->br_blockcount -= del->br_blockcount;
4999 got->br_startblock = del->br_startblock + del->br_blockcount;
5000 xfs_iext_update_extent(ip, state, icur, got);
5002 case BMAP_RIGHT_FILLING:
5004 * Deleting the last part of the extent.
5006 got->br_blockcount -= del->br_blockcount;
5007 xfs_iext_update_extent(ip, state, icur, got);
5011 * Deleting the middle of the extent.
5013 got->br_blockcount = del->br_startoff - got->br_startoff;
5015 new.br_startoff = del_endoff;
5016 new.br_blockcount = got_endoff - del_endoff;
5017 new.br_state = got->br_state;
5018 new.br_startblock = del->br_startblock + del->br_blockcount;
5020 xfs_iext_update_extent(ip, state, icur, got);
5021 xfs_iext_next(ifp, icur);
5022 xfs_iext_insert(ip, icur, &new, state);
5025 ip->i_delayed_blks -= del->br_blockcount;
5029 * Called by xfs_bmapi to update file extent records and the btree
5030 * after removing space.
5032 STATIC int /* error */
5033 xfs_bmap_del_extent_real(
5034 xfs_inode_t *ip, /* incore inode pointer */
5035 xfs_trans_t *tp, /* current transaction pointer */
5036 struct xfs_iext_cursor *icur,
5037 xfs_btree_cur_t *cur, /* if null, not a btree */
5038 xfs_bmbt_irec_t *del, /* data to remove from extents */
5039 int *logflagsp, /* inode logging flags */
5040 int whichfork, /* data or attr fork */
5041 int bflags) /* bmapi flags */
5043 xfs_fsblock_t del_endblock=0; /* first block past del */
5044 xfs_fileoff_t del_endoff; /* first offset past del */
5045 int do_fx; /* free extent at end of routine */
5046 int error; /* error return value */
5047 int flags = 0;/* inode logging flags */
5048 struct xfs_bmbt_irec got; /* current extent entry */
5049 xfs_fileoff_t got_endoff; /* first offset past got */
5050 int i; /* temp state */
5051 struct xfs_ifork *ifp; /* inode fork pointer */
5052 xfs_mount_t *mp; /* mount structure */
5053 xfs_filblks_t nblks; /* quota/sb block count */
5054 xfs_bmbt_irec_t new; /* new record to be inserted */
5056 uint qfield; /* quota field to update */
5057 int state = xfs_bmap_fork_to_state(whichfork);
5058 struct xfs_bmbt_irec old;
5061 XFS_STATS_INC(mp, xs_del_exlist);
5063 ifp = XFS_IFORK_PTR(ip, whichfork);
5064 ASSERT(del->br_blockcount > 0);
5065 xfs_iext_get_extent(ifp, icur, &got);
5066 ASSERT(got.br_startoff <= del->br_startoff);
5067 del_endoff = del->br_startoff + del->br_blockcount;
5068 got_endoff = got.br_startoff + got.br_blockcount;
5069 ASSERT(got_endoff >= del_endoff);
5070 ASSERT(!isnullstartblock(got.br_startblock));
5075 * If it's the case where the directory code is running with no block
5076 * reservation, and the deleted block is in the middle of its extent,
5077 * and the resulting insert of an extent would cause transformation to
5078 * btree format, then reject it. The calling code will then swap blocks
5079 * around instead. We have to do this now, rather than waiting for the
5080 * conversion to btree format, since the transaction will be dirty then.
5082 if (tp->t_blk_res == 0 &&
5083 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5084 XFS_IFORK_NEXTENTS(ip, whichfork) >=
5085 XFS_IFORK_MAXEXT(ip, whichfork) &&
5086 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5089 flags = XFS_ILOG_CORE;
5090 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5095 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
5098 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
5102 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5106 nblks = len * mp->m_sb.sb_rextsize;
5107 qfield = XFS_TRANS_DQ_RTBCOUNT;
5110 nblks = del->br_blockcount;
5111 qfield = XFS_TRANS_DQ_BCOUNT;
5114 del_endblock = del->br_startblock + del->br_blockcount;
5116 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5119 if (XFS_IS_CORRUPT(mp, i != 1)) {
5120 error = -EFSCORRUPTED;
5125 if (got.br_startoff == del->br_startoff)
5126 state |= BMAP_LEFT_FILLING;
5127 if (got_endoff == del_endoff)
5128 state |= BMAP_RIGHT_FILLING;
5130 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5131 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5133 * Matches the whole extent. Delete the entry.
5135 xfs_iext_remove(ip, icur, state);
5136 xfs_iext_prev(ifp, icur);
5137 XFS_IFORK_NEXT_SET(ip, whichfork,
5138 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5139 flags |= XFS_ILOG_CORE;
5141 flags |= xfs_ilog_fext(whichfork);
5144 if ((error = xfs_btree_delete(cur, &i)))
5146 if (XFS_IS_CORRUPT(mp, i != 1)) {
5147 error = -EFSCORRUPTED;
5151 case BMAP_LEFT_FILLING:
5153 * Deleting the first part of the extent.
5155 got.br_startoff = del_endoff;
5156 got.br_startblock = del_endblock;
5157 got.br_blockcount -= del->br_blockcount;
5158 xfs_iext_update_extent(ip, state, icur, &got);
5160 flags |= xfs_ilog_fext(whichfork);
5163 error = xfs_bmbt_update(cur, &got);
5167 case BMAP_RIGHT_FILLING:
5169 * Deleting the last part of the extent.
5171 got.br_blockcount -= del->br_blockcount;
5172 xfs_iext_update_extent(ip, state, icur, &got);
5174 flags |= xfs_ilog_fext(whichfork);
5177 error = xfs_bmbt_update(cur, &got);
5183 * Deleting the middle of the extent.
5187 got.br_blockcount = del->br_startoff - got.br_startoff;
5188 xfs_iext_update_extent(ip, state, icur, &got);
5190 new.br_startoff = del_endoff;
5191 new.br_blockcount = got_endoff - del_endoff;
5192 new.br_state = got.br_state;
5193 new.br_startblock = del_endblock;
5195 flags |= XFS_ILOG_CORE;
5197 error = xfs_bmbt_update(cur, &got);
5200 error = xfs_btree_increment(cur, 0, &i);
5203 cur->bc_rec.b = new;
5204 error = xfs_btree_insert(cur, &i);
5205 if (error && error != -ENOSPC)
5208 * If get no-space back from btree insert, it tried a
5209 * split, and we have a zero block reservation. Fix up
5210 * our state and return the error.
5212 if (error == -ENOSPC) {
5214 * Reset the cursor, don't trust it after any
5217 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5220 if (XFS_IS_CORRUPT(mp, i != 1)) {
5221 error = -EFSCORRUPTED;
5225 * Update the btree record back
5226 * to the original value.
5228 error = xfs_bmbt_update(cur, &old);
5232 * Reset the extent record back
5233 * to the original value.
5235 xfs_iext_update_extent(ip, state, icur, &old);
5240 if (XFS_IS_CORRUPT(mp, i != 1)) {
5241 error = -EFSCORRUPTED;
5245 flags |= xfs_ilog_fext(whichfork);
5246 XFS_IFORK_NEXT_SET(ip, whichfork,
5247 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5248 xfs_iext_next(ifp, icur);
5249 xfs_iext_insert(ip, icur, &new, state);
5253 /* remove reverse mapping */
5254 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5257 * If we need to, add to list of extents to delete.
5259 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5260 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5261 xfs_refcount_decrease_extent(tp, del);
5263 __xfs_bmap_add_free(tp, del->br_startblock,
5264 del->br_blockcount, NULL,
5265 (bflags & XFS_BMAPI_NODISCARD) ||
5266 del->br_state == XFS_EXT_UNWRITTEN);
5271 * Adjust inode # blocks in the file.
5274 ip->i_d.di_nblocks -= nblks;
5276 * Adjust quota data.
5278 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5279 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5287 * Unmap (remove) blocks from a file.
5288 * If nexts is nonzero then the number of extents to remove is limited to
5289 * that value. If not all extents in the block range can be removed then
5294 struct xfs_trans *tp, /* transaction pointer */
5295 struct xfs_inode *ip, /* incore inode */
5296 xfs_fileoff_t start, /* first file offset deleted */
5297 xfs_filblks_t *rlen, /* i/o: amount remaining */
5298 int flags, /* misc flags */
5299 xfs_extnum_t nexts) /* number of extents max */
5301 struct xfs_btree_cur *cur; /* bmap btree cursor */
5302 struct xfs_bmbt_irec del; /* extent being deleted */
5303 int error; /* error return value */
5304 xfs_extnum_t extno; /* extent number in list */
5305 struct xfs_bmbt_irec got; /* current extent record */
5306 struct xfs_ifork *ifp; /* inode fork pointer */
5307 int isrt; /* freeing in rt area */
5308 int logflags; /* transaction logging flags */
5309 xfs_extlen_t mod; /* rt extent offset */
5310 struct xfs_mount *mp = ip->i_mount;
5311 int tmp_logflags; /* partial logging flags */
5312 int wasdel; /* was a delayed alloc extent */
5313 int whichfork; /* data or attribute fork */
5315 xfs_filblks_t len = *rlen; /* length to unmap in file */
5316 xfs_fileoff_t max_len;
5317 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5319 struct xfs_iext_cursor icur;
5322 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5324 whichfork = xfs_bmapi_whichfork(flags);
5325 ASSERT(whichfork != XFS_COW_FORK);
5326 ifp = XFS_IFORK_PTR(ip, whichfork);
5327 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)))
5328 return -EFSCORRUPTED;
5329 if (XFS_FORCED_SHUTDOWN(mp))
5332 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5337 * Guesstimate how many blocks we can unmap without running the risk of
5338 * blowing out the transaction with a mix of EFIs and reflink
5341 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5342 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5346 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5347 (error = xfs_iread_extents(tp, ip, whichfork)))
5349 if (xfs_iext_count(ifp) == 0) {
5353 XFS_STATS_INC(mp, xs_blk_unmap);
5354 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5357 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5364 if (ifp->if_flags & XFS_IFBROOT) {
5365 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5366 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5367 cur->bc_private.b.flags = 0;
5373 * Synchronize by locking the bitmap inode.
5375 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5376 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5377 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5378 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5382 while (end != (xfs_fileoff_t)-1 && end >= start &&
5383 (nexts == 0 || extno < nexts) && max_len > 0) {
5385 * Is the found extent after a hole in which end lives?
5386 * Just back up to the previous extent, if so.
5388 if (got.br_startoff > end &&
5389 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5394 * Is the last block of this extent before the range
5395 * we're supposed to delete? If so, we're done.
5397 end = XFS_FILEOFF_MIN(end,
5398 got.br_startoff + got.br_blockcount - 1);
5402 * Then deal with the (possibly delayed) allocated space
5406 wasdel = isnullstartblock(del.br_startblock);
5409 * Make sure we don't touch multiple AGF headers out of order
5410 * in a single transaction, as that could cause AB-BA deadlocks.
5412 if (!wasdel && !isrt) {
5413 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5414 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5418 if (got.br_startoff < start) {
5419 del.br_startoff = start;
5420 del.br_blockcount -= start - got.br_startoff;
5422 del.br_startblock += start - got.br_startoff;
5424 if (del.br_startoff + del.br_blockcount > end + 1)
5425 del.br_blockcount = end + 1 - del.br_startoff;
5427 /* How much can we safely unmap? */
5428 if (max_len < del.br_blockcount) {
5429 del.br_startoff += del.br_blockcount - max_len;
5431 del.br_startblock += del.br_blockcount - max_len;
5432 del.br_blockcount = max_len;
5438 sum = del.br_startblock + del.br_blockcount;
5439 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5442 * Realtime extent not lined up at the end.
5443 * The extent could have been split into written
5444 * and unwritten pieces, or we could just be
5445 * unmapping part of it. But we can't really
5446 * get rid of part of a realtime extent.
5448 if (del.br_state == XFS_EXT_UNWRITTEN) {
5450 * This piece is unwritten, or we're not
5451 * using unwritten extents. Skip over it.
5454 end -= mod > del.br_blockcount ?
5455 del.br_blockcount : mod;
5456 if (end < got.br_startoff &&
5457 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5464 * It's written, turn it unwritten.
5465 * This is better than zeroing it.
5467 ASSERT(del.br_state == XFS_EXT_NORM);
5468 ASSERT(tp->t_blk_res > 0);
5470 * If this spans a realtime extent boundary,
5471 * chop it back to the start of the one we end at.
5473 if (del.br_blockcount > mod) {
5474 del.br_startoff += del.br_blockcount - mod;
5475 del.br_startblock += del.br_blockcount - mod;
5476 del.br_blockcount = mod;
5478 del.br_state = XFS_EXT_UNWRITTEN;
5479 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5480 whichfork, &icur, &cur, &del,
5486 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5488 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5491 * Realtime extent is lined up at the end but not
5492 * at the front. We'll get rid of full extents if
5495 if (del.br_blockcount > off) {
5496 del.br_blockcount -= off;
5497 del.br_startoff += off;
5498 del.br_startblock += off;
5499 } else if (del.br_startoff == start &&
5500 (del.br_state == XFS_EXT_UNWRITTEN ||
5501 tp->t_blk_res == 0)) {
5503 * Can't make it unwritten. There isn't
5504 * a full extent here so just skip it.
5506 ASSERT(end >= del.br_blockcount);
5507 end -= del.br_blockcount;
5508 if (got.br_startoff > end &&
5509 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5514 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5515 struct xfs_bmbt_irec prev;
5516 xfs_fileoff_t unwrite_start;
5519 * This one is already unwritten.
5520 * It must have a written left neighbor.
5521 * Unwrite the killed part of that one and
5524 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5526 ASSERT(prev.br_state == XFS_EXT_NORM);
5527 ASSERT(!isnullstartblock(prev.br_startblock));
5528 ASSERT(del.br_startblock ==
5529 prev.br_startblock + prev.br_blockcount);
5530 unwrite_start = max3(start,
5531 del.br_startoff - mod,
5533 mod = unwrite_start - prev.br_startoff;
5534 prev.br_startoff = unwrite_start;
5535 prev.br_startblock += mod;
5536 prev.br_blockcount -= mod;
5537 prev.br_state = XFS_EXT_UNWRITTEN;
5538 error = xfs_bmap_add_extent_unwritten_real(tp,
5539 ip, whichfork, &icur, &cur,
5545 ASSERT(del.br_state == XFS_EXT_NORM);
5546 del.br_state = XFS_EXT_UNWRITTEN;
5547 error = xfs_bmap_add_extent_unwritten_real(tp,
5548 ip, whichfork, &icur, &cur,
5558 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5561 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5562 &del, &tmp_logflags, whichfork,
5564 logflags |= tmp_logflags;
5570 max_len -= del.br_blockcount;
5571 end = del.br_startoff - 1;
5574 * If not done go on to the next (previous) record.
5576 if (end != (xfs_fileoff_t)-1 && end >= start) {
5577 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5578 (got.br_startoff > end &&
5579 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5586 if (done || end == (xfs_fileoff_t)-1 || end < start)
5589 *rlen = end - start + 1;
5592 * Convert to a btree if necessary.
5594 if (xfs_bmap_needs_btree(ip, whichfork)) {
5595 ASSERT(cur == NULL);
5596 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5597 &tmp_logflags, whichfork);
5598 logflags |= tmp_logflags;
5600 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5606 * Log everything. Do this after conversion, there's no point in
5607 * logging the extent records if we've converted to btree format.
5609 if ((logflags & xfs_ilog_fext(whichfork)) &&
5610 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5611 logflags &= ~xfs_ilog_fext(whichfork);
5612 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5613 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5614 logflags &= ~xfs_ilog_fbroot(whichfork);
5616 * Log inode even in the error case, if the transaction
5617 * is dirty we'll need to shut down the filesystem.
5620 xfs_trans_log_inode(tp, ip, logflags);
5623 cur->bc_private.b.allocated = 0;
5624 xfs_btree_del_cursor(cur, error);
5629 /* Unmap a range of a file. */
5633 struct xfs_inode *ip,
5642 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5648 * Determine whether an extent shift can be accomplished by a merge with the
5649 * extent that precedes the target hole of the shift.
5653 struct xfs_bmbt_irec *left, /* preceding extent */
5654 struct xfs_bmbt_irec *got, /* current extent to shift */
5655 xfs_fileoff_t shift) /* shift fsb */
5657 xfs_fileoff_t startoff;
5659 startoff = got->br_startoff - shift;
5662 * The extent, once shifted, must be adjacent in-file and on-disk with
5663 * the preceding extent.
5665 if ((left->br_startoff + left->br_blockcount != startoff) ||
5666 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5667 (left->br_state != got->br_state) ||
5668 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5675 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5676 * hole in the file. If an extent shift would result in the extent being fully
5677 * adjacent to the extent that currently precedes the hole, we can merge with
5678 * the preceding extent rather than do the shift.
5680 * This function assumes the caller has verified a shift-by-merge is possible
5681 * with the provided extents via xfs_bmse_can_merge().
5685 struct xfs_trans *tp,
5686 struct xfs_inode *ip,
5688 xfs_fileoff_t shift, /* shift fsb */
5689 struct xfs_iext_cursor *icur,
5690 struct xfs_bmbt_irec *got, /* extent to shift */
5691 struct xfs_bmbt_irec *left, /* preceding extent */
5692 struct xfs_btree_cur *cur,
5693 int *logflags) /* output */
5695 struct xfs_bmbt_irec new;
5696 xfs_filblks_t blockcount;
5698 struct xfs_mount *mp = ip->i_mount;
5700 blockcount = left->br_blockcount + got->br_blockcount;
5702 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5703 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5704 ASSERT(xfs_bmse_can_merge(left, got, shift));
5707 new.br_blockcount = blockcount;
5710 * Update the on-disk extent count, the btree if necessary and log the
5713 XFS_IFORK_NEXT_SET(ip, whichfork,
5714 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5715 *logflags |= XFS_ILOG_CORE;
5717 *logflags |= XFS_ILOG_DEXT;
5721 /* lookup and remove the extent to merge */
5722 error = xfs_bmbt_lookup_eq(cur, got, &i);
5725 if (XFS_IS_CORRUPT(mp, i != 1))
5726 return -EFSCORRUPTED;
5728 error = xfs_btree_delete(cur, &i);
5731 if (XFS_IS_CORRUPT(mp, i != 1))
5732 return -EFSCORRUPTED;
5734 /* lookup and update size of the previous extent */
5735 error = xfs_bmbt_lookup_eq(cur, left, &i);
5738 if (XFS_IS_CORRUPT(mp, i != 1))
5739 return -EFSCORRUPTED;
5741 error = xfs_bmbt_update(cur, &new);
5745 /* change to extent format if required after extent removal */
5746 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5751 xfs_iext_remove(ip, icur, 0);
5752 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5753 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5756 /* update reverse mapping. rmap functions merge the rmaps for us */
5757 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5758 memcpy(&new, got, sizeof(new));
5759 new.br_startoff = left->br_startoff + left->br_blockcount;
5760 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5765 xfs_bmap_shift_update_extent(
5766 struct xfs_trans *tp,
5767 struct xfs_inode *ip,
5769 struct xfs_iext_cursor *icur,
5770 struct xfs_bmbt_irec *got,
5771 struct xfs_btree_cur *cur,
5773 xfs_fileoff_t startoff)
5775 struct xfs_mount *mp = ip->i_mount;
5776 struct xfs_bmbt_irec prev = *got;
5779 *logflags |= XFS_ILOG_CORE;
5781 got->br_startoff = startoff;
5784 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5787 if (XFS_IS_CORRUPT(mp, i != 1))
5788 return -EFSCORRUPTED;
5790 error = xfs_bmbt_update(cur, got);
5794 *logflags |= XFS_ILOG_DEXT;
5797 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5800 /* update reverse mapping */
5801 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5802 xfs_rmap_map_extent(tp, ip, whichfork, got);
5807 xfs_bmap_collapse_extents(
5808 struct xfs_trans *tp,
5809 struct xfs_inode *ip,
5810 xfs_fileoff_t *next_fsb,
5811 xfs_fileoff_t offset_shift_fsb,
5814 int whichfork = XFS_DATA_FORK;
5815 struct xfs_mount *mp = ip->i_mount;
5816 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5817 struct xfs_btree_cur *cur = NULL;
5818 struct xfs_bmbt_irec got, prev;
5819 struct xfs_iext_cursor icur;
5820 xfs_fileoff_t new_startoff;
5824 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
5825 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5826 return -EFSCORRUPTED;
5829 if (XFS_FORCED_SHUTDOWN(mp))
5832 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5834 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5835 error = xfs_iread_extents(tp, ip, whichfork);
5840 if (ifp->if_flags & XFS_IFBROOT) {
5841 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5842 cur->bc_private.b.flags = 0;
5845 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5849 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5850 error = -EFSCORRUPTED;
5854 new_startoff = got.br_startoff - offset_shift_fsb;
5855 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5856 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5861 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5862 error = xfs_bmse_merge(tp, ip, whichfork,
5863 offset_shift_fsb, &icur, &got, &prev,
5870 if (got.br_startoff < offset_shift_fsb) {
5876 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5877 cur, &logflags, new_startoff);
5882 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5887 *next_fsb = got.br_startoff;
5890 xfs_btree_del_cursor(cur, error);
5892 xfs_trans_log_inode(tp, ip, logflags);
5896 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5898 xfs_bmap_can_insert_extents(
5899 struct xfs_inode *ip,
5901 xfs_fileoff_t shift)
5903 struct xfs_bmbt_irec got;
5907 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5909 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5912 xfs_ilock(ip, XFS_ILOCK_EXCL);
5913 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5914 if (!error && !is_empty && got.br_startoff >= off &&
5915 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5917 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5923 xfs_bmap_insert_extents(
5924 struct xfs_trans *tp,
5925 struct xfs_inode *ip,
5926 xfs_fileoff_t *next_fsb,
5927 xfs_fileoff_t offset_shift_fsb,
5929 xfs_fileoff_t stop_fsb)
5931 int whichfork = XFS_DATA_FORK;
5932 struct xfs_mount *mp = ip->i_mount;
5933 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5934 struct xfs_btree_cur *cur = NULL;
5935 struct xfs_bmbt_irec got, next;
5936 struct xfs_iext_cursor icur;
5937 xfs_fileoff_t new_startoff;
5941 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
5942 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5943 return -EFSCORRUPTED;
5946 if (XFS_FORCED_SHUTDOWN(mp))
5949 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5951 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5952 error = xfs_iread_extents(tp, ip, whichfork);
5957 if (ifp->if_flags & XFS_IFBROOT) {
5958 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5959 cur->bc_private.b.flags = 0;
5962 if (*next_fsb == NULLFSBLOCK) {
5963 xfs_iext_last(ifp, &icur);
5964 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5965 stop_fsb > got.br_startoff) {
5970 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5975 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5976 error = -EFSCORRUPTED;
5980 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5981 error = -EFSCORRUPTED;
5985 new_startoff = got.br_startoff + offset_shift_fsb;
5986 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5987 if (new_startoff + got.br_blockcount > next.br_startoff) {
5993 * Unlike a left shift (which involves a hole punch), a right
5994 * shift does not modify extent neighbors in any way. We should
5995 * never find mergeable extents in this scenario. Check anyways
5996 * and warn if we encounter two extents that could be one.
5998 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
6002 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6003 cur, &logflags, new_startoff);
6007 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
6008 stop_fsb >= got.br_startoff + got.br_blockcount) {
6013 *next_fsb = got.br_startoff;
6016 xfs_btree_del_cursor(cur, error);
6018 xfs_trans_log_inode(tp, ip, logflags);
6023 * Splits an extent into two extents at split_fsb block such that it is the
6024 * first block of the current_ext. @ext is a target extent to be split.
6025 * @split_fsb is a block where the extents is split. If split_fsb lies in a
6026 * hole or the first block of extents, just return 0.
6029 xfs_bmap_split_extent_at(
6030 struct xfs_trans *tp,
6031 struct xfs_inode *ip,
6032 xfs_fileoff_t split_fsb)
6034 int whichfork = XFS_DATA_FORK;
6035 struct xfs_btree_cur *cur = NULL;
6036 struct xfs_bmbt_irec got;
6037 struct xfs_bmbt_irec new; /* split extent */
6038 struct xfs_mount *mp = ip->i_mount;
6039 struct xfs_ifork *ifp;
6040 xfs_fsblock_t gotblkcnt; /* new block count for got */
6041 struct xfs_iext_cursor icur;
6046 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
6047 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6048 return -EFSCORRUPTED;
6051 if (XFS_FORCED_SHUTDOWN(mp))
6054 ifp = XFS_IFORK_PTR(ip, whichfork);
6055 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6056 /* Read in all the extents */
6057 error = xfs_iread_extents(tp, ip, whichfork);
6063 * If there are not extents, or split_fsb lies in a hole we are done.
6065 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6066 got.br_startoff >= split_fsb)
6069 gotblkcnt = split_fsb - got.br_startoff;
6070 new.br_startoff = split_fsb;
6071 new.br_startblock = got.br_startblock + gotblkcnt;
6072 new.br_blockcount = got.br_blockcount - gotblkcnt;
6073 new.br_state = got.br_state;
6075 if (ifp->if_flags & XFS_IFBROOT) {
6076 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6077 cur->bc_private.b.flags = 0;
6078 error = xfs_bmbt_lookup_eq(cur, &got, &i);
6081 if (XFS_IS_CORRUPT(mp, i != 1)) {
6082 error = -EFSCORRUPTED;
6087 got.br_blockcount = gotblkcnt;
6088 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6091 logflags = XFS_ILOG_CORE;
6093 error = xfs_bmbt_update(cur, &got);
6097 logflags |= XFS_ILOG_DEXT;
6099 /* Add new extent */
6100 xfs_iext_next(ifp, &icur);
6101 xfs_iext_insert(ip, &icur, &new, 0);
6102 XFS_IFORK_NEXT_SET(ip, whichfork,
6103 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
6106 error = xfs_bmbt_lookup_eq(cur, &new, &i);
6109 if (XFS_IS_CORRUPT(mp, i != 0)) {
6110 error = -EFSCORRUPTED;
6113 error = xfs_btree_insert(cur, &i);
6116 if (XFS_IS_CORRUPT(mp, i != 1)) {
6117 error = -EFSCORRUPTED;
6123 * Convert to a btree if necessary.
6125 if (xfs_bmap_needs_btree(ip, whichfork)) {
6126 int tmp_logflags; /* partial log flag return val */
6128 ASSERT(cur == NULL);
6129 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6130 &tmp_logflags, whichfork);
6131 logflags |= tmp_logflags;
6136 cur->bc_private.b.allocated = 0;
6137 xfs_btree_del_cursor(cur, error);
6141 xfs_trans_log_inode(tp, ip, logflags);
6146 xfs_bmap_split_extent(
6147 struct xfs_inode *ip,
6148 xfs_fileoff_t split_fsb)
6150 struct xfs_mount *mp = ip->i_mount;
6151 struct xfs_trans *tp;
6154 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6155 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6159 xfs_ilock(ip, XFS_ILOCK_EXCL);
6160 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6162 error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
6166 return xfs_trans_commit(tp);
6169 xfs_trans_cancel(tp);
6173 /* Deferred mapping is only for real extents in the data fork. */
6175 xfs_bmap_is_update_needed(
6176 struct xfs_bmbt_irec *bmap)
6178 return bmap->br_startblock != HOLESTARTBLOCK &&
6179 bmap->br_startblock != DELAYSTARTBLOCK;
6182 /* Record a bmap intent. */
6185 struct xfs_trans *tp,
6186 enum xfs_bmap_intent_type type,
6187 struct xfs_inode *ip,
6189 struct xfs_bmbt_irec *bmap)
6191 struct xfs_bmap_intent *bi;
6193 trace_xfs_bmap_defer(tp->t_mountp,
6194 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6196 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6197 ip->i_ino, whichfork,
6199 bmap->br_blockcount,
6202 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
6203 INIT_LIST_HEAD(&bi->bi_list);
6206 bi->bi_whichfork = whichfork;
6207 bi->bi_bmap = *bmap;
6209 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6213 /* Map an extent into a file. */
6215 xfs_bmap_map_extent(
6216 struct xfs_trans *tp,
6217 struct xfs_inode *ip,
6218 struct xfs_bmbt_irec *PREV)
6220 if (!xfs_bmap_is_update_needed(PREV))
6223 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6226 /* Unmap an extent out of a file. */
6228 xfs_bmap_unmap_extent(
6229 struct xfs_trans *tp,
6230 struct xfs_inode *ip,
6231 struct xfs_bmbt_irec *PREV)
6233 if (!xfs_bmap_is_update_needed(PREV))
6236 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6240 * Process one of the deferred bmap operations. We pass back the
6241 * btree cursor to maintain our lock on the bmapbt between calls.
6244 xfs_bmap_finish_one(
6245 struct xfs_trans *tp,
6246 struct xfs_inode *ip,
6247 enum xfs_bmap_intent_type type,
6249 xfs_fileoff_t startoff,
6250 xfs_fsblock_t startblock,
6251 xfs_filblks_t *blockcount,
6256 ASSERT(tp->t_firstblock == NULLFSBLOCK);
6258 trace_xfs_bmap_deferred(tp->t_mountp,
6259 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6260 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6261 ip->i_ino, whichfork, startoff, *blockcount, state);
6263 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6264 return -EFSCORRUPTED;
6266 if (XFS_TEST_ERROR(false, tp->t_mountp,
6267 XFS_ERRTAG_BMAP_FINISH_ONE))
6272 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6276 case XFS_BMAP_UNMAP:
6277 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6278 XFS_BMAPI_REMAP, 1);
6282 error = -EFSCORRUPTED;
6288 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6290 xfs_bmap_validate_extent(
6291 struct xfs_inode *ip,
6293 struct xfs_bmbt_irec *irec)
6295 struct xfs_mount *mp = ip->i_mount;
6296 xfs_fsblock_t endfsb;
6299 isrt = XFS_IS_REALTIME_INODE(ip);
6300 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6302 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6303 return __this_address;
6304 if (!xfs_verify_rtbno(mp, endfsb))
6305 return __this_address;
6307 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6308 return __this_address;
6309 if (!xfs_verify_fsbno(mp, endfsb))
6310 return __this_address;
6311 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6312 XFS_FSB_TO_AGNO(mp, endfsb))
6313 return __this_address;
6315 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6316 return __this_address;