2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
50 #include "xfs_ag_resv.h"
51 #include "xfs_refcount.h"
52 #include "xfs_rmap_btree.h"
53 #include "xfs_icache.h"
56 kmem_zone_t *xfs_bmap_free_item_zone;
59 * Miscellaneous helper functions
63 * Compute and fill in the value of the maximum depth of a bmap btree
64 * in this filesystem. Done once, during mount.
67 xfs_bmap_compute_maxlevels(
68 xfs_mount_t *mp, /* file system mount structure */
69 int whichfork) /* data or attr fork */
71 int level; /* btree level */
72 uint maxblocks; /* max blocks at this level */
73 uint maxleafents; /* max leaf entries possible */
74 int maxrootrecs; /* max records in root block */
75 int minleafrecs; /* min records in leaf block */
76 int minnoderecs; /* min records in node block */
77 int sz; /* root block size */
80 * The maximum number of extents in a file, hence the maximum
81 * number of leaf entries, is controlled by the type of di_nextents
82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
83 * (a signed 16-bit number, xfs_aextnum_t).
85 * Note that we can no longer assume that if we are in ATTR1 that
86 * the fork offset of all the inodes will be
87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
88 * with ATTR2 and then mounted back with ATTR1, keeping the
89 * di_forkoff's fixed but probably at various positions. Therefore,
90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
91 * of a minimum size available.
93 if (whichfork == XFS_DATA_FORK) {
94 maxleafents = MAXEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
97 maxleafents = MAXAEXTNUM;
98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
101 minleafrecs = mp->m_bmap_dmnr[0];
102 minnoderecs = mp->m_bmap_dmnr[1];
103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
104 for (level = 1; maxblocks > 1; level++) {
105 if (maxblocks <= maxrootrecs)
108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
110 mp->m_bm_maxlevels[whichfork] = level;
113 STATIC int /* error */
115 struct xfs_btree_cur *cur,
119 int *stat) /* success/failure */
121 cur->bc_rec.b.br_startoff = off;
122 cur->bc_rec.b.br_startblock = bno;
123 cur->bc_rec.b.br_blockcount = len;
124 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
127 STATIC int /* error */
129 struct xfs_btree_cur *cur,
133 int *stat) /* success/failure */
135 cur->bc_rec.b.br_startoff = off;
136 cur->bc_rec.b.br_startblock = bno;
137 cur->bc_rec.b.br_blockcount = len;
138 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
142 * Check if the inode needs to be converted to btree format.
144 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
146 return whichfork != XFS_COW_FORK &&
147 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
148 XFS_IFORK_NEXTENTS(ip, whichfork) >
149 XFS_IFORK_MAXEXT(ip, whichfork);
153 * Check if the inode should be converted to extent format.
155 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
157 return whichfork != XFS_COW_FORK &&
158 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
159 XFS_IFORK_NEXTENTS(ip, whichfork) <=
160 XFS_IFORK_MAXEXT(ip, whichfork);
164 * Update the record referred to by cur to the value given
165 * by [off, bno, len, state].
166 * This either works (return 0) or gets an EFSCORRUPTED error.
170 struct xfs_btree_cur *cur,
176 union xfs_btree_rec rec;
178 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
179 return xfs_btree_update(cur, &rec);
183 * Compute the worst-case number of indirect blocks that will be used
184 * for ip's delayed extent of length "len".
187 xfs_bmap_worst_indlen(
188 xfs_inode_t *ip, /* incore inode pointer */
189 xfs_filblks_t len) /* delayed extent length */
191 int level; /* btree level number */
192 int maxrecs; /* maximum record count at this level */
193 xfs_mount_t *mp; /* mount structure */
194 xfs_filblks_t rval; /* return value */
195 xfs_filblks_t orig_len;
199 /* Calculate the worst-case size of the bmbt. */
201 maxrecs = mp->m_bmap_dmxr[0];
202 for (level = 0, rval = 0;
203 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
206 do_div(len, maxrecs);
209 rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
214 maxrecs = mp->m_bmap_dmxr[1];
217 /* Calculate the worst-case size of the rmapbt. */
218 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
219 rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
220 mp->m_rmap_maxlevels;
226 * Calculate the default attribute fork offset for newly created inodes.
229 xfs_default_attroffset(
230 struct xfs_inode *ip)
232 struct xfs_mount *mp = ip->i_mount;
235 if (mp->m_sb.sb_inodesize == 256) {
236 offset = XFS_LITINO(mp, ip->i_d.di_version) -
237 XFS_BMDR_SPACE_CALC(MINABTPTRS);
239 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
242 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
247 * Helper routine to reset inode di_forkoff field when switching
248 * attribute fork from local to extent format - we reset it where
249 * possible to make space available for inline data fork extents.
252 xfs_bmap_forkoff_reset(
256 if (whichfork == XFS_ATTR_FORK &&
257 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
258 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
259 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
260 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
262 if (dfl_forkoff > ip->i_d.di_forkoff)
263 ip->i_d.di_forkoff = dfl_forkoff;
268 STATIC struct xfs_buf *
270 struct xfs_btree_cur *cur,
273 struct xfs_log_item_desc *lidp;
279 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
280 if (!cur->bc_bufs[i])
282 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
283 return cur->bc_bufs[i];
286 /* Chase down all the log items to see if the bp is there */
287 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
288 struct xfs_buf_log_item *bip;
289 bip = (struct xfs_buf_log_item *)lidp->lid_item;
290 if (bip->bli_item.li_type == XFS_LI_BUF &&
291 XFS_BUF_ADDR(bip->bli_buf) == bno)
300 struct xfs_btree_block *block,
306 __be64 *pp, *thispa; /* pointer to block address */
307 xfs_bmbt_key_t *prevp, *keyp;
309 ASSERT(be16_to_cpu(block->bb_level) > 0);
312 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
313 dmxr = mp->m_bmap_dmxr[0];
314 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
317 ASSERT(be64_to_cpu(prevp->br_startoff) <
318 be64_to_cpu(keyp->br_startoff));
323 * Compare the block numbers to see if there are dups.
326 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
328 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
330 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
332 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
334 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
335 if (*thispa == *pp) {
336 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
338 (unsigned long long)be64_to_cpu(*thispa));
339 panic("%s: ptrs are equal in node\n",
347 * Check that the extents for the inode ip are in the right order in all
348 * btree leaves. THis becomes prohibitively expensive for large extent count
349 * files, so don't bother with inodes that have more than 10,000 extents in
350 * them. The btree record ordering checks will still be done, so for such large
351 * bmapbt constructs that is going to catch most corruptions.
354 xfs_bmap_check_leaf_extents(
355 xfs_btree_cur_t *cur, /* btree cursor or null */
356 xfs_inode_t *ip, /* incore inode pointer */
357 int whichfork) /* data or attr fork */
359 struct xfs_btree_block *block; /* current btree block */
360 xfs_fsblock_t bno; /* block # of "block" */
361 xfs_buf_t *bp; /* buffer for "block" */
362 int error; /* error return value */
363 xfs_extnum_t i=0, j; /* index into the extents list */
364 xfs_ifork_t *ifp; /* fork structure */
365 int level; /* btree level, for checking */
366 xfs_mount_t *mp; /* file system mount structure */
367 __be64 *pp; /* pointer to block address */
368 xfs_bmbt_rec_t *ep; /* pointer to current extent */
369 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
370 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
373 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
377 /* skip large extent count inodes */
378 if (ip->i_d.di_nextents > 10000)
383 ifp = XFS_IFORK_PTR(ip, whichfork);
384 block = ifp->if_broot;
386 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
388 level = be16_to_cpu(block->bb_level);
390 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
391 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
392 bno = be64_to_cpu(*pp);
394 ASSERT(bno != NULLFSBLOCK);
395 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
396 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
399 * Go down the tree until leaf level is reached, following the first
400 * pointer (leftmost) at each level.
402 while (level-- > 0) {
403 /* See if buf is in cur first */
405 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
408 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
414 block = XFS_BUF_TO_BLOCK(bp);
419 * Check this block for basic sanity (increasing keys and
420 * no duplicate blocks).
423 xfs_check_block(block, mp, 0, 0);
424 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
425 bno = be64_to_cpu(*pp);
426 XFS_WANT_CORRUPTED_GOTO(mp,
427 XFS_FSB_SANITY_CHECK(mp, bno), error0);
430 xfs_trans_brelse(NULL, bp);
435 * Here with bp and block set to the leftmost leaf node in the tree.
440 * Loop over all leaf nodes checking that all extents are in the right order.
443 xfs_fsblock_t nextbno;
444 xfs_extnum_t num_recs;
447 num_recs = xfs_btree_get_numrecs(block);
450 * Read-ahead the next leaf block, if any.
453 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
456 * Check all the extents to make sure they are OK.
457 * If we had a previous block, the last entry should
458 * conform with the first entry in this one.
461 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
463 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
464 xfs_bmbt_disk_get_blockcount(&last) <=
465 xfs_bmbt_disk_get_startoff(ep));
467 for (j = 1; j < num_recs; j++) {
468 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
469 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
470 xfs_bmbt_disk_get_blockcount(ep) <=
471 xfs_bmbt_disk_get_startoff(nextp));
479 xfs_trans_brelse(NULL, bp);
483 * If we've reached the end, stop.
485 if (bno == NULLFSBLOCK)
489 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
492 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
498 block = XFS_BUF_TO_BLOCK(bp);
504 xfs_warn(mp, "%s: at error0", __func__);
506 xfs_trans_brelse(NULL, bp);
508 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
510 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
515 * Add bmap trace insert entries for all the contents of the extent records.
518 xfs_bmap_trace_exlist(
519 xfs_inode_t *ip, /* incore inode pointer */
520 xfs_extnum_t cnt, /* count of entries in the list */
521 int whichfork, /* data or attr or cow fork */
522 unsigned long caller_ip)
524 xfs_extnum_t idx; /* extent record index */
525 xfs_ifork_t *ifp; /* inode fork pointer */
528 if (whichfork == XFS_ATTR_FORK)
529 state |= BMAP_ATTRFORK;
530 else if (whichfork == XFS_COW_FORK)
531 state |= BMAP_COWFORK;
533 ifp = XFS_IFORK_PTR(ip, whichfork);
534 ASSERT(cnt == xfs_iext_count(ifp));
535 for (idx = 0; idx < cnt; idx++)
536 trace_xfs_extlist(ip, idx, state, caller_ip);
540 * Validate that the bmbt_irecs being returned from bmapi are valid
541 * given the caller's original parameters. Specifically check the
542 * ranges of the returned irecs to ensure that they only extend beyond
543 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
546 xfs_bmap_validate_ret(
550 xfs_bmbt_irec_t *mval,
554 int i; /* index to map values */
556 ASSERT(ret_nmap <= nmap);
558 for (i = 0; i < ret_nmap; i++) {
559 ASSERT(mval[i].br_blockcount > 0);
560 if (!(flags & XFS_BMAPI_ENTIRE)) {
561 ASSERT(mval[i].br_startoff >= bno);
562 ASSERT(mval[i].br_blockcount <= len);
563 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
566 ASSERT(mval[i].br_startoff < bno + len);
567 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
571 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
572 mval[i].br_startoff);
573 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
574 mval[i].br_startblock != HOLESTARTBLOCK);
575 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
576 mval[i].br_state == XFS_EXT_UNWRITTEN);
581 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
582 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
586 * bmap free list manipulation functions
590 * Add the extent to the list of extents to be free at transaction end.
591 * The list is maintained sorted (by block number).
595 struct xfs_mount *mp,
596 struct xfs_defer_ops *dfops,
599 struct xfs_owner_info *oinfo)
601 struct xfs_extent_free_item *new; /* new element */
606 ASSERT(bno != NULLFSBLOCK);
608 ASSERT(len <= MAXEXTLEN);
609 ASSERT(!isnullstartblock(bno));
610 agno = XFS_FSB_TO_AGNO(mp, bno);
611 agbno = XFS_FSB_TO_AGBNO(mp, bno);
612 ASSERT(agno < mp->m_sb.sb_agcount);
613 ASSERT(agbno < mp->m_sb.sb_agblocks);
614 ASSERT(len < mp->m_sb.sb_agblocks);
615 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
617 ASSERT(xfs_bmap_free_item_zone != NULL);
619 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
620 new->xefi_startblock = bno;
621 new->xefi_blockcount = (xfs_extlen_t)len;
623 new->xefi_oinfo = *oinfo;
625 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
626 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
627 XFS_FSB_TO_AGBNO(mp, bno), len);
628 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
632 * Inode fork format manipulation functions
636 * Transform a btree format file with only one leaf node, where the
637 * extents list will fit in the inode, into an extents format file.
638 * Since the file extents are already in-core, all we have to do is
639 * give up the space for the btree root and pitch the leaf block.
641 STATIC int /* error */
642 xfs_bmap_btree_to_extents(
643 xfs_trans_t *tp, /* transaction pointer */
644 xfs_inode_t *ip, /* incore inode pointer */
645 xfs_btree_cur_t *cur, /* btree cursor */
646 int *logflagsp, /* inode logging flags */
647 int whichfork) /* data or attr fork */
650 struct xfs_btree_block *cblock;/* child btree block */
651 xfs_fsblock_t cbno; /* child block number */
652 xfs_buf_t *cbp; /* child block's buffer */
653 int error; /* error return value */
654 xfs_ifork_t *ifp; /* inode fork data */
655 xfs_mount_t *mp; /* mount point structure */
656 __be64 *pp; /* ptr to block address */
657 struct xfs_btree_block *rblock;/* root btree block */
658 struct xfs_owner_info oinfo;
661 ifp = XFS_IFORK_PTR(ip, whichfork);
662 ASSERT(whichfork != XFS_COW_FORK);
663 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
664 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
665 rblock = ifp->if_broot;
666 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
667 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
668 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
669 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
670 cbno = be64_to_cpu(*pp);
673 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
676 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
680 cblock = XFS_BUF_TO_BLOCK(cbp);
681 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
683 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
684 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
685 ip->i_d.di_nblocks--;
686 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
687 xfs_trans_binval(tp, cbp);
688 if (cur->bc_bufs[0] == cbp)
689 cur->bc_bufs[0] = NULL;
690 xfs_iroot_realloc(ip, -1, whichfork);
691 ASSERT(ifp->if_broot == NULL);
692 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
693 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
694 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
699 * Convert an extents-format file into a btree-format file.
700 * The new file will have a root block (in the inode) and a single child block.
702 STATIC int /* error */
703 xfs_bmap_extents_to_btree(
704 xfs_trans_t *tp, /* transaction pointer */
705 xfs_inode_t *ip, /* incore inode pointer */
706 xfs_fsblock_t *firstblock, /* first-block-allocated */
707 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
708 xfs_btree_cur_t **curp, /* cursor returned to caller */
709 int wasdel, /* converting a delayed alloc */
710 int *logflagsp, /* inode logging flags */
711 int whichfork) /* data or attr fork */
713 struct xfs_btree_block *ablock; /* allocated (child) bt block */
714 xfs_buf_t *abp; /* buffer for ablock */
715 xfs_alloc_arg_t args; /* allocation arguments */
716 xfs_bmbt_rec_t *arp; /* child record pointer */
717 struct xfs_btree_block *block; /* btree root block */
718 xfs_btree_cur_t *cur; /* bmap btree cursor */
719 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
720 int error; /* error return value */
721 xfs_extnum_t i, cnt; /* extent record index */
722 xfs_ifork_t *ifp; /* inode fork pointer */
723 xfs_bmbt_key_t *kp; /* root block key pointer */
724 xfs_mount_t *mp; /* mount structure */
725 xfs_extnum_t nextents; /* number of file extents */
726 xfs_bmbt_ptr_t *pp; /* root block address pointer */
729 ASSERT(whichfork != XFS_COW_FORK);
730 ifp = XFS_IFORK_PTR(ip, whichfork);
731 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
734 * Make space in the inode incore.
736 xfs_iroot_realloc(ip, 1, whichfork);
737 ifp->if_flags |= XFS_IFBROOT;
742 block = ifp->if_broot;
743 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
744 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
745 XFS_BTREE_LONG_PTRS);
747 * Need a cursor. Can't allocate until bb_level is filled in.
749 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
750 cur->bc_private.b.firstblock = *firstblock;
751 cur->bc_private.b.dfops = dfops;
752 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
754 * Convert to a btree with two levels, one record in root.
756 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
757 memset(&args, 0, sizeof(args));
760 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
761 args.firstblock = *firstblock;
762 if (*firstblock == NULLFSBLOCK) {
763 args.type = XFS_ALLOCTYPE_START_BNO;
764 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
765 } else if (dfops->dop_low) {
766 args.type = XFS_ALLOCTYPE_START_BNO;
767 args.fsbno = *firstblock;
769 args.type = XFS_ALLOCTYPE_NEAR_BNO;
770 args.fsbno = *firstblock;
772 args.minlen = args.maxlen = args.prod = 1;
773 args.wasdel = wasdel;
775 if ((error = xfs_alloc_vextent(&args))) {
776 xfs_iroot_realloc(ip, -1, whichfork);
777 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
781 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
782 xfs_iroot_realloc(ip, -1, whichfork);
783 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
787 * Allocation can't fail, the space was reserved.
789 ASSERT(*firstblock == NULLFSBLOCK ||
790 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
791 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
792 cur->bc_private.b.allocated++;
793 ip->i_d.di_nblocks++;
794 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
795 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
797 * Fill in the child block.
799 abp->b_ops = &xfs_bmbt_buf_ops;
800 ablock = XFS_BUF_TO_BLOCK(abp);
801 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
802 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
803 XFS_BTREE_LONG_PTRS);
805 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
806 nextents = xfs_iext_count(ifp);
807 for (cnt = i = 0; i < nextents; i++) {
808 ep = xfs_iext_get_ext(ifp, i);
809 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
810 arp->l0 = cpu_to_be64(ep->l0);
811 arp->l1 = cpu_to_be64(ep->l1);
815 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
816 xfs_btree_set_numrecs(ablock, cnt);
819 * Fill in the root key and pointer.
821 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
822 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
823 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
824 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
825 be16_to_cpu(block->bb_level)));
826 *pp = cpu_to_be64(args.fsbno);
829 * Do all this logging at the end so that
830 * the root is at the right level.
832 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
833 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
834 ASSERT(*curp == NULL);
836 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
841 * Convert a local file to an extents file.
842 * This code is out of bounds for data forks of regular files,
843 * since the file data needs to get logged so things will stay consistent.
844 * (The bmap-level manipulations are ok, though).
847 xfs_bmap_local_to_extents_empty(
848 struct xfs_inode *ip,
851 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
853 ASSERT(whichfork != XFS_COW_FORK);
854 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
855 ASSERT(ifp->if_bytes == 0);
856 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
858 xfs_bmap_forkoff_reset(ip, whichfork);
859 ifp->if_flags &= ~XFS_IFINLINE;
860 ifp->if_flags |= XFS_IFEXTENTS;
861 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
865 STATIC int /* error */
866 xfs_bmap_local_to_extents(
867 xfs_trans_t *tp, /* transaction pointer */
868 xfs_inode_t *ip, /* incore inode pointer */
869 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
870 xfs_extlen_t total, /* total blocks needed by transaction */
871 int *logflagsp, /* inode logging flags */
873 void (*init_fn)(struct xfs_trans *tp,
875 struct xfs_inode *ip,
876 struct xfs_ifork *ifp))
879 int flags; /* logging flags returned */
880 xfs_ifork_t *ifp; /* inode fork pointer */
881 xfs_alloc_arg_t args; /* allocation arguments */
882 xfs_buf_t *bp; /* buffer for extent block */
883 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
886 * We don't want to deal with the case of keeping inode data inline yet.
887 * So sending the data fork of a regular inode is invalid.
889 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
890 ifp = XFS_IFORK_PTR(ip, whichfork);
891 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
893 if (!ifp->if_bytes) {
894 xfs_bmap_local_to_extents_empty(ip, whichfork);
895 flags = XFS_ILOG_CORE;
901 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
903 memset(&args, 0, sizeof(args));
905 args.mp = ip->i_mount;
906 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
907 args.firstblock = *firstblock;
909 * Allocate a block. We know we need only one, since the
910 * file currently fits in an inode.
912 if (*firstblock == NULLFSBLOCK) {
913 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
914 args.type = XFS_ALLOCTYPE_START_BNO;
916 args.fsbno = *firstblock;
917 args.type = XFS_ALLOCTYPE_NEAR_BNO;
920 args.minlen = args.maxlen = args.prod = 1;
921 error = xfs_alloc_vextent(&args);
925 /* Can't fail, the space was reserved. */
926 ASSERT(args.fsbno != NULLFSBLOCK);
927 ASSERT(args.len == 1);
928 *firstblock = args.fsbno;
929 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
932 * Initialize the block, copy the data and log the remote buffer.
934 * The callout is responsible for logging because the remote format
935 * might differ from the local format and thus we don't know how much to
936 * log here. Note that init_fn must also set the buffer log item type
939 init_fn(tp, bp, ip, ifp);
941 /* account for the change in fork size */
942 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
943 xfs_bmap_local_to_extents_empty(ip, whichfork);
944 flags |= XFS_ILOG_CORE;
946 xfs_iext_add(ifp, 0, 1);
947 ep = xfs_iext_get_ext(ifp, 0);
948 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
949 trace_xfs_bmap_post_update(ip, 0,
950 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
952 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
953 ip->i_d.di_nblocks = 1;
954 xfs_trans_mod_dquot_byino(tp, ip,
955 XFS_TRANS_DQ_BCOUNT, 1L);
956 flags |= xfs_ilog_fext(whichfork);
964 * Called from xfs_bmap_add_attrfork to handle btree format files.
966 STATIC int /* error */
967 xfs_bmap_add_attrfork_btree(
968 xfs_trans_t *tp, /* transaction pointer */
969 xfs_inode_t *ip, /* incore inode pointer */
970 xfs_fsblock_t *firstblock, /* first block allocated */
971 struct xfs_defer_ops *dfops, /* blocks to free at commit */
972 int *flags) /* inode logging flags */
974 xfs_btree_cur_t *cur; /* btree cursor */
975 int error; /* error return value */
976 xfs_mount_t *mp; /* file system mount struct */
977 int stat; /* newroot status */
980 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
981 *flags |= XFS_ILOG_DBROOT;
983 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
984 cur->bc_private.b.dfops = dfops;
985 cur->bc_private.b.firstblock = *firstblock;
986 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
988 /* must be at least one entry */
989 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
990 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
993 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
996 *firstblock = cur->bc_private.b.firstblock;
997 cur->bc_private.b.allocated = 0;
998 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1002 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1007 * Called from xfs_bmap_add_attrfork to handle extents format files.
1009 STATIC int /* error */
1010 xfs_bmap_add_attrfork_extents(
1011 xfs_trans_t *tp, /* transaction pointer */
1012 xfs_inode_t *ip, /* incore inode pointer */
1013 xfs_fsblock_t *firstblock, /* first block allocated */
1014 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1015 int *flags) /* inode logging flags */
1017 xfs_btree_cur_t *cur; /* bmap btree cursor */
1018 int error; /* error return value */
1020 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1023 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
1024 flags, XFS_DATA_FORK);
1026 cur->bc_private.b.allocated = 0;
1027 xfs_btree_del_cursor(cur,
1028 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1034 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1035 * different data fork content type needs a different callout to do the
1036 * conversion. Some are basic and only require special block initialisation
1037 * callouts for the data formating, others (directories) are so specialised they
1038 * handle everything themselves.
1040 * XXX (dgc): investigate whether directory conversion can use the generic
1041 * formatting callout. It should be possible - it's just a very complex
1044 STATIC int /* error */
1045 xfs_bmap_add_attrfork_local(
1046 xfs_trans_t *tp, /* transaction pointer */
1047 xfs_inode_t *ip, /* incore inode pointer */
1048 xfs_fsblock_t *firstblock, /* first block allocated */
1049 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1050 int *flags) /* inode logging flags */
1052 xfs_da_args_t dargs; /* args for dir/attr code */
1054 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1057 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1058 memset(&dargs, 0, sizeof(dargs));
1059 dargs.geo = ip->i_mount->m_dir_geo;
1061 dargs.firstblock = firstblock;
1062 dargs.dfops = dfops;
1063 dargs.total = dargs.geo->fsbcount;
1064 dargs.whichfork = XFS_DATA_FORK;
1066 return xfs_dir2_sf_to_block(&dargs);
1069 if (S_ISLNK(VFS_I(ip)->i_mode))
1070 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1071 flags, XFS_DATA_FORK,
1072 xfs_symlink_local_to_remote);
1074 /* should only be called for types that support local format data */
1076 return -EFSCORRUPTED;
1080 * Convert inode from non-attributed to attributed.
1081 * Must not be in a transaction, ip must not be locked.
1083 int /* error code */
1084 xfs_bmap_add_attrfork(
1085 xfs_inode_t *ip, /* incore inode pointer */
1086 int size, /* space new attribute needs */
1087 int rsvd) /* xact may use reserved blks */
1089 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1090 struct xfs_defer_ops dfops; /* freed extent records */
1091 xfs_mount_t *mp; /* mount structure */
1092 xfs_trans_t *tp; /* transaction pointer */
1093 int blks; /* space reservation */
1094 int version = 1; /* superblock attr version */
1095 int logflags; /* logging flags */
1096 int error; /* error return value */
1098 ASSERT(XFS_IFORK_Q(ip) == 0);
1101 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1103 blks = XFS_ADDAFORK_SPACE_RES(mp);
1105 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1106 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1110 xfs_ilock(ip, XFS_ILOCK_EXCL);
1111 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1112 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1113 XFS_QMOPT_RES_REGBLKS);
1116 if (XFS_IFORK_Q(ip))
1118 if (ip->i_d.di_anextents != 0) {
1119 error = -EFSCORRUPTED;
1122 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1124 * For inodes coming from pre-6.2 filesystems.
1126 ASSERT(ip->i_d.di_aformat == 0);
1127 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1130 xfs_trans_ijoin(tp, ip, 0);
1131 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1133 switch (ip->i_d.di_format) {
1134 case XFS_DINODE_FMT_DEV:
1135 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1137 case XFS_DINODE_FMT_UUID:
1138 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1140 case XFS_DINODE_FMT_LOCAL:
1141 case XFS_DINODE_FMT_EXTENTS:
1142 case XFS_DINODE_FMT_BTREE:
1143 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1144 if (!ip->i_d.di_forkoff)
1145 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1146 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1155 ASSERT(ip->i_afp == NULL);
1156 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1157 ip->i_afp->if_flags = XFS_IFEXTENTS;
1159 xfs_defer_init(&dfops, &firstblock);
1160 switch (ip->i_d.di_format) {
1161 case XFS_DINODE_FMT_LOCAL:
1162 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1165 case XFS_DINODE_FMT_EXTENTS:
1166 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1169 case XFS_DINODE_FMT_BTREE:
1170 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1178 xfs_trans_log_inode(tp, ip, logflags);
1181 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1182 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1183 bool log_sb = false;
1185 spin_lock(&mp->m_sb_lock);
1186 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1187 xfs_sb_version_addattr(&mp->m_sb);
1190 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1191 xfs_sb_version_addattr2(&mp->m_sb);
1194 spin_unlock(&mp->m_sb_lock);
1199 error = xfs_defer_finish(&tp, &dfops, NULL);
1202 error = xfs_trans_commit(tp);
1203 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1207 xfs_defer_cancel(&dfops);
1209 xfs_trans_cancel(tp);
1210 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1215 * Internal and external extent tree search functions.
1219 * Read in the extents to if_extents.
1220 * All inode fields are set up by caller, we just traverse the btree
1221 * and copy the records in. If the file system cannot contain unwritten
1222 * extents, the records are checked for no "state" flags.
1225 xfs_bmap_read_extents(
1226 xfs_trans_t *tp, /* transaction pointer */
1227 xfs_inode_t *ip, /* incore inode */
1228 int whichfork) /* data or attr fork */
1230 struct xfs_btree_block *block; /* current btree block */
1231 xfs_fsblock_t bno; /* block # of "block" */
1232 xfs_buf_t *bp; /* buffer for "block" */
1233 int error; /* error return value */
1234 xfs_extnum_t i, j; /* index into the extents list */
1235 xfs_ifork_t *ifp; /* fork structure */
1236 int level; /* btree level, for checking */
1237 xfs_mount_t *mp; /* file system mount structure */
1238 __be64 *pp; /* pointer to block address */
1240 xfs_extnum_t room; /* number of entries there's room for */
1243 ifp = XFS_IFORK_PTR(ip, whichfork);
1244 block = ifp->if_broot;
1246 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1248 level = be16_to_cpu(block->bb_level);
1250 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1251 bno = be64_to_cpu(*pp);
1254 * Go down the tree until leaf level is reached, following the first
1255 * pointer (leftmost) at each level.
1257 while (level-- > 0) {
1258 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1259 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1262 block = XFS_BUF_TO_BLOCK(bp);
1265 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1266 bno = be64_to_cpu(*pp);
1267 XFS_WANT_CORRUPTED_GOTO(mp,
1268 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1269 xfs_trans_brelse(tp, bp);
1272 * Here with bp and block set to the leftmost leaf node in the tree.
1274 room = xfs_iext_count(ifp);
1277 * Loop over all leaf nodes. Copy information to the extent records.
1280 xfs_bmbt_rec_t *frp;
1281 xfs_fsblock_t nextbno;
1282 xfs_extnum_t num_recs;
1284 num_recs = xfs_btree_get_numrecs(block);
1285 if (unlikely(i + num_recs > room)) {
1286 ASSERT(i + num_recs <= room);
1287 xfs_warn(ip->i_mount,
1288 "corrupt dinode %Lu, (btree extents).",
1289 (unsigned long long) ip->i_ino);
1290 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1291 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1295 * Read-ahead the next leaf block, if any.
1297 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1298 if (nextbno != NULLFSBLOCK)
1299 xfs_btree_reada_bufl(mp, nextbno, 1,
1302 * Copy records into the extent records.
1304 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1305 for (j = 0; j < num_recs; j++, i++, frp++) {
1306 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1307 trp->l0 = be64_to_cpu(frp->l0);
1308 trp->l1 = be64_to_cpu(frp->l1);
1309 if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) {
1310 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1311 XFS_ERRLEVEL_LOW, mp);
1315 xfs_trans_brelse(tp, bp);
1318 * If we've reached the end, stop.
1320 if (bno == NULLFSBLOCK)
1322 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1323 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1326 block = XFS_BUF_TO_BLOCK(bp);
1328 if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
1329 return -EFSCORRUPTED;
1330 ASSERT(i == xfs_iext_count(ifp));
1331 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1334 xfs_trans_brelse(tp, bp);
1335 return -EFSCORRUPTED;
1339 * Returns the file-relative block number of the first unused block(s)
1340 * in the file with at least "len" logically contiguous blocks free.
1341 * This is the lowest-address hole if the file has holes, else the first block
1342 * past the end of file.
1343 * Return 0 if the file is currently local (in-inode).
1346 xfs_bmap_first_unused(
1347 xfs_trans_t *tp, /* transaction pointer */
1348 xfs_inode_t *ip, /* incore inode */
1349 xfs_extlen_t len, /* size of hole to find */
1350 xfs_fileoff_t *first_unused, /* unused block */
1351 int whichfork) /* data or attr fork */
1353 int error; /* error return value */
1354 int idx; /* extent record index */
1355 xfs_ifork_t *ifp; /* inode fork pointer */
1356 xfs_fileoff_t lastaddr; /* last block number seen */
1357 xfs_fileoff_t lowest; /* lowest useful block */
1358 xfs_fileoff_t max; /* starting useful block */
1359 xfs_fileoff_t off; /* offset for this block */
1360 xfs_extnum_t nextents; /* number of extent entries */
1362 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1363 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1364 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1365 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1369 ifp = XFS_IFORK_PTR(ip, whichfork);
1370 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1371 (error = xfs_iread_extents(tp, ip, whichfork)))
1373 lowest = *first_unused;
1374 nextents = xfs_iext_count(ifp);
1375 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1376 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1377 off = xfs_bmbt_get_startoff(ep);
1379 * See if the hole before this extent will work.
1381 if (off >= lowest + len && off - max >= len) {
1382 *first_unused = max;
1385 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1386 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1388 *first_unused = max;
1393 * Returns the file-relative block number of the last block - 1 before
1394 * last_block (input value) in the file.
1395 * This is not based on i_size, it is based on the extent records.
1396 * Returns 0 for local files, as they do not have extent records.
1399 xfs_bmap_last_before(
1400 struct xfs_trans *tp, /* transaction pointer */
1401 struct xfs_inode *ip, /* incore inode */
1402 xfs_fileoff_t *last_block, /* last block */
1403 int whichfork) /* data or attr fork */
1405 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1406 struct xfs_bmbt_irec got;
1410 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1411 case XFS_DINODE_FMT_LOCAL:
1414 case XFS_DINODE_FMT_BTREE:
1415 case XFS_DINODE_FMT_EXTENTS:
1421 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1422 error = xfs_iread_extents(tp, ip, whichfork);
1427 if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) {
1428 if (got.br_startoff <= *last_block - 1)
1432 if (xfs_iext_get_extent(ifp, idx - 1, &got)) {
1433 *last_block = got.br_startoff + got.br_blockcount;
1442 xfs_bmap_last_extent(
1443 struct xfs_trans *tp,
1444 struct xfs_inode *ip,
1446 struct xfs_bmbt_irec *rec,
1449 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1453 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1454 error = xfs_iread_extents(tp, ip, whichfork);
1459 nextents = xfs_iext_count(ifp);
1460 if (nextents == 0) {
1465 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1471 * Check the last inode extent to determine whether this allocation will result
1472 * in blocks being allocated at the end of the file. When we allocate new data
1473 * blocks at the end of the file which do not start at the previous data block,
1474 * we will try to align the new blocks at stripe unit boundaries.
1476 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1477 * at, or past the EOF.
1481 struct xfs_bmalloca *bma,
1484 struct xfs_bmbt_irec rec;
1489 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1500 * Check if we are allocation or past the last extent, or at least into
1501 * the last delayed allocated extent.
1503 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1504 (bma->offset >= rec.br_startoff &&
1505 isnullstartblock(rec.br_startblock));
1510 * Returns the file-relative block number of the first block past eof in
1511 * the file. This is not based on i_size, it is based on the extent records.
1512 * Returns 0 for local files, as they do not have extent records.
1515 xfs_bmap_last_offset(
1516 struct xfs_inode *ip,
1517 xfs_fileoff_t *last_block,
1520 struct xfs_bmbt_irec rec;
1526 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1529 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1530 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1533 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1534 if (error || is_empty)
1537 *last_block = rec.br_startoff + rec.br_blockcount;
1542 * Returns whether the selected fork of the inode has exactly one
1543 * block or not. For the data fork we check this matches di_size,
1544 * implying the file's range is 0..bsize-1.
1546 int /* 1=>1 block, 0=>otherwise */
1548 xfs_inode_t *ip, /* incore inode */
1549 int whichfork) /* data or attr fork */
1551 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1552 xfs_ifork_t *ifp; /* inode fork pointer */
1553 int rval; /* return value */
1554 xfs_bmbt_irec_t s; /* internal version of extent */
1557 if (whichfork == XFS_DATA_FORK)
1558 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1560 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1562 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1564 ifp = XFS_IFORK_PTR(ip, whichfork);
1565 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1566 ep = xfs_iext_get_ext(ifp, 0);
1567 xfs_bmbt_get_all(ep, &s);
1568 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1569 if (rval && whichfork == XFS_DATA_FORK)
1570 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1575 * Extent tree manipulation functions used during allocation.
1579 * Convert a delayed allocation to a real allocation.
1581 STATIC int /* error */
1582 xfs_bmap_add_extent_delay_real(
1583 struct xfs_bmalloca *bma,
1586 struct xfs_bmbt_irec *new = &bma->got;
1587 int diff; /* temp value */
1588 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1589 int error; /* error return value */
1590 int i; /* temp state */
1591 xfs_ifork_t *ifp; /* inode fork pointer */
1592 xfs_fileoff_t new_endoff; /* end offset of new entry */
1593 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1594 /* left is 0, right is 1, prev is 2 */
1595 int rval=0; /* return value (logging flags) */
1596 int state = 0;/* state bits, accessed thru macros */
1597 xfs_filblks_t da_new; /* new count del alloc blocks used */
1598 xfs_filblks_t da_old; /* old count del alloc blocks used */
1599 xfs_filblks_t temp=0; /* value for da_new calculations */
1600 xfs_filblks_t temp2=0;/* value for da_new calculations */
1601 int tmp_rval; /* partial logging flags */
1602 struct xfs_mount *mp;
1603 xfs_extnum_t *nextents;
1605 mp = bma->ip->i_mount;
1606 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1607 ASSERT(whichfork != XFS_ATTR_FORK);
1608 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1609 &bma->ip->i_d.di_nextents);
1611 ASSERT(bma->idx >= 0);
1612 ASSERT(bma->idx <= xfs_iext_count(ifp));
1613 ASSERT(!isnullstartblock(new->br_startblock));
1615 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1617 XFS_STATS_INC(mp, xs_add_exlist);
1623 if (whichfork == XFS_COW_FORK)
1624 state |= BMAP_COWFORK;
1627 * Set up a bunch of variables to make the tests simpler.
1629 ep = xfs_iext_get_ext(ifp, bma->idx);
1630 xfs_bmbt_get_all(ep, &PREV);
1631 new_endoff = new->br_startoff + new->br_blockcount;
1632 ASSERT(PREV.br_startoff <= new->br_startoff);
1633 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1635 da_old = startblockval(PREV.br_startblock);
1639 * Set flags determining what part of the previous delayed allocation
1640 * extent is being replaced by a real allocation.
1642 if (PREV.br_startoff == new->br_startoff)
1643 state |= BMAP_LEFT_FILLING;
1644 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1645 state |= BMAP_RIGHT_FILLING;
1648 * Check and set flags if this segment has a left neighbor.
1649 * Don't set contiguous if the combined extent would be too large.
1652 state |= BMAP_LEFT_VALID;
1653 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1655 if (isnullstartblock(LEFT.br_startblock))
1656 state |= BMAP_LEFT_DELAY;
1659 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1660 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1661 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1662 LEFT.br_state == new->br_state &&
1663 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1664 state |= BMAP_LEFT_CONTIG;
1667 * Check and set flags if this segment has a right neighbor.
1668 * Don't set contiguous if the combined extent would be too large.
1669 * Also check for all-three-contiguous being too large.
1671 if (bma->idx < xfs_iext_count(ifp) - 1) {
1672 state |= BMAP_RIGHT_VALID;
1673 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1675 if (isnullstartblock(RIGHT.br_startblock))
1676 state |= BMAP_RIGHT_DELAY;
1679 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1680 new_endoff == RIGHT.br_startoff &&
1681 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1682 new->br_state == RIGHT.br_state &&
1683 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1684 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1685 BMAP_RIGHT_FILLING)) !=
1686 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1687 BMAP_RIGHT_FILLING) ||
1688 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1690 state |= BMAP_RIGHT_CONTIG;
1694 * Switch out based on the FILLING and CONTIG state bits.
1696 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1697 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1698 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1699 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1701 * Filling in all of a previously delayed allocation extent.
1702 * The left and right neighbors are both contiguous with new.
1705 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1706 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1707 LEFT.br_blockcount + PREV.br_blockcount +
1708 RIGHT.br_blockcount);
1709 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1711 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1713 if (bma->cur == NULL)
1714 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1716 rval = XFS_ILOG_CORE;
1717 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1718 RIGHT.br_startblock,
1719 RIGHT.br_blockcount, &i);
1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1723 error = xfs_btree_delete(bma->cur, &i);
1726 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1727 error = xfs_btree_decrement(bma->cur, 0, &i);
1730 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1731 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1733 LEFT.br_blockcount +
1734 PREV.br_blockcount +
1735 RIGHT.br_blockcount, LEFT.br_state);
1741 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1743 * Filling in all of a previously delayed allocation extent.
1744 * The left neighbor is contiguous, the right is not.
1748 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1749 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1750 LEFT.br_blockcount + PREV.br_blockcount);
1751 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1753 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1754 if (bma->cur == NULL)
1755 rval = XFS_ILOG_DEXT;
1758 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1759 LEFT.br_startblock, LEFT.br_blockcount,
1763 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1764 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1766 LEFT.br_blockcount +
1767 PREV.br_blockcount, LEFT.br_state);
1773 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1775 * Filling in all of a previously delayed allocation extent.
1776 * The right neighbor is contiguous, the left is not.
1778 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1779 xfs_bmbt_set_startblock(ep, new->br_startblock);
1780 xfs_bmbt_set_blockcount(ep,
1781 PREV.br_blockcount + RIGHT.br_blockcount);
1782 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1784 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1785 if (bma->cur == NULL)
1786 rval = XFS_ILOG_DEXT;
1789 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1790 RIGHT.br_startblock,
1791 RIGHT.br_blockcount, &i);
1794 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1795 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1797 PREV.br_blockcount +
1798 RIGHT.br_blockcount, PREV.br_state);
1804 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1806 * Filling in all of a previously delayed allocation extent.
1807 * Neither the left nor right neighbors are contiguous with
1810 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1811 xfs_bmbt_set_startblock(ep, new->br_startblock);
1812 xfs_bmbt_set_state(ep, new->br_state);
1813 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1816 if (bma->cur == NULL)
1817 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1819 rval = XFS_ILOG_CORE;
1820 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1821 new->br_startblock, new->br_blockcount,
1825 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1826 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1827 error = xfs_btree_insert(bma->cur, &i);
1830 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1834 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1836 * Filling in the first part of a previous delayed allocation.
1837 * The left neighbor is contiguous.
1839 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1840 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1841 LEFT.br_blockcount + new->br_blockcount);
1842 xfs_bmbt_set_startoff(ep,
1843 PREV.br_startoff + new->br_blockcount);
1844 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1846 temp = PREV.br_blockcount - new->br_blockcount;
1847 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1848 xfs_bmbt_set_blockcount(ep, temp);
1849 if (bma->cur == NULL)
1850 rval = XFS_ILOG_DEXT;
1853 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1854 LEFT.br_startblock, LEFT.br_blockcount,
1858 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1859 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1861 LEFT.br_blockcount +
1867 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1868 startblockval(PREV.br_startblock));
1869 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1870 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1875 case BMAP_LEFT_FILLING:
1877 * Filling in the first part of a previous delayed allocation.
1878 * The left neighbor is not contiguous.
1880 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1881 xfs_bmbt_set_startoff(ep, new_endoff);
1882 temp = PREV.br_blockcount - new->br_blockcount;
1883 xfs_bmbt_set_blockcount(ep, temp);
1884 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1886 if (bma->cur == NULL)
1887 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1889 rval = XFS_ILOG_CORE;
1890 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1891 new->br_startblock, new->br_blockcount,
1895 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1896 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1897 error = xfs_btree_insert(bma->cur, &i);
1900 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1903 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1904 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1905 bma->firstblock, bma->dfops,
1906 &bma->cur, 1, &tmp_rval, whichfork);
1911 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1912 startblockval(PREV.br_startblock) -
1913 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1914 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
1915 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1916 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1919 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1921 * Filling in the last part of a previous delayed allocation.
1922 * The right neighbor is contiguous with the new allocation.
1924 temp = PREV.br_blockcount - new->br_blockcount;
1925 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1926 xfs_bmbt_set_blockcount(ep, temp);
1927 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
1928 new->br_startoff, new->br_startblock,
1929 new->br_blockcount + RIGHT.br_blockcount,
1931 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1932 if (bma->cur == NULL)
1933 rval = XFS_ILOG_DEXT;
1936 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1937 RIGHT.br_startblock,
1938 RIGHT.br_blockcount, &i);
1941 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1942 error = xfs_bmbt_update(bma->cur, new->br_startoff,
1944 new->br_blockcount +
1945 RIGHT.br_blockcount,
1951 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1952 startblockval(PREV.br_startblock));
1953 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1954 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1955 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1960 case BMAP_RIGHT_FILLING:
1962 * Filling in the last part of a previous delayed allocation.
1963 * The right neighbor is not contiguous.
1965 temp = PREV.br_blockcount - new->br_blockcount;
1966 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1967 xfs_bmbt_set_blockcount(ep, temp);
1968 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
1970 if (bma->cur == NULL)
1971 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1973 rval = XFS_ILOG_CORE;
1974 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1975 new->br_startblock, new->br_blockcount,
1979 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1980 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1981 error = xfs_btree_insert(bma->cur, &i);
1984 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1987 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1988 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1989 bma->firstblock, bma->dfops, &bma->cur, 1,
1990 &tmp_rval, whichfork);
1995 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1996 startblockval(PREV.br_startblock) -
1997 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1998 ep = xfs_iext_get_ext(ifp, bma->idx);
1999 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2000 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2007 * Filling in the middle part of a previous delayed allocation.
2008 * Contiguity is impossible here.
2009 * This case is avoided almost all the time.
2011 * We start with a delayed allocation:
2013 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2016 * and we are allocating:
2017 * +rrrrrrrrrrrrrrrrr+
2020 * and we set it up for insertion as:
2021 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2023 * PREV @ idx LEFT RIGHT
2024 * inserted at idx + 1
2026 temp = new->br_startoff - PREV.br_startoff;
2027 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2028 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2029 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2031 RIGHT.br_state = PREV.br_state;
2032 RIGHT.br_startblock = nullstartblock(
2033 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2034 RIGHT.br_startoff = new_endoff;
2035 RIGHT.br_blockcount = temp2;
2036 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2037 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2039 if (bma->cur == NULL)
2040 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2042 rval = XFS_ILOG_CORE;
2043 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2044 new->br_startblock, new->br_blockcount,
2048 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2049 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2050 error = xfs_btree_insert(bma->cur, &i);
2053 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2056 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2057 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2058 bma->firstblock, bma->dfops, &bma->cur,
2059 1, &tmp_rval, whichfork);
2064 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2065 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2066 diff = (int)(temp + temp2 -
2067 (startblockval(PREV.br_startblock) -
2069 bma->cur->bc_private.b.allocated : 0)));
2071 error = xfs_mod_fdblocks(bma->ip->i_mount,
2072 -((int64_t)diff), false);
2078 ep = xfs_iext_get_ext(ifp, bma->idx);
2079 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2080 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2081 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2082 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2083 nullstartblock((int)temp2));
2084 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2087 da_new = temp + temp2;
2090 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2091 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2092 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2093 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2094 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2095 case BMAP_LEFT_CONTIG:
2096 case BMAP_RIGHT_CONTIG:
2098 * These cases are all impossible.
2103 /* add reverse mapping */
2104 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2108 /* convert to a btree if necessary */
2109 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2110 int tmp_logflags; /* partial log flag return val */
2112 ASSERT(bma->cur == NULL);
2113 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2114 bma->firstblock, bma->dfops, &bma->cur,
2115 da_old > 0, &tmp_logflags, whichfork);
2116 bma->logflags |= tmp_logflags;
2121 /* adjust for changes in reserved delayed indirect blocks */
2122 if (da_old || da_new) {
2125 temp += bma->cur->bc_private.b.allocated;
2127 xfs_mod_fdblocks(bma->ip->i_mount,
2128 (int64_t)(da_old - temp), false);
2131 /* clear out the allocated field, done with it now in any case. */
2133 bma->cur->bc_private.b.allocated = 0;
2135 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2137 if (whichfork != XFS_COW_FORK)
2138 bma->logflags |= rval;
2146 * Convert an unwritten allocation to a real allocation or vice versa.
2148 STATIC int /* error */
2149 xfs_bmap_add_extent_unwritten_real(
2150 struct xfs_trans *tp,
2151 xfs_inode_t *ip, /* incore inode pointer */
2153 xfs_extnum_t *idx, /* extent number to update/insert */
2154 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2155 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2156 xfs_fsblock_t *first, /* pointer to firstblock variable */
2157 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2158 int *logflagsp) /* inode logging flags */
2160 xfs_btree_cur_t *cur; /* btree cursor */
2161 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2162 int error; /* error return value */
2163 int i; /* temp state */
2164 xfs_ifork_t *ifp; /* inode fork pointer */
2165 xfs_fileoff_t new_endoff; /* end offset of new entry */
2166 xfs_exntst_t newext; /* new extent state */
2167 xfs_exntst_t oldext; /* old extent state */
2168 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2169 /* left is 0, right is 1, prev is 2 */
2170 int rval=0; /* return value (logging flags) */
2171 int state = 0;/* state bits, accessed thru macros */
2172 struct xfs_mount *mp = ip->i_mount;
2177 ifp = XFS_IFORK_PTR(ip, whichfork);
2178 if (whichfork == XFS_COW_FORK)
2179 state |= BMAP_COWFORK;
2182 ASSERT(*idx <= xfs_iext_count(ifp));
2183 ASSERT(!isnullstartblock(new->br_startblock));
2185 XFS_STATS_INC(mp, xs_add_exlist);
2192 * Set up a bunch of variables to make the tests simpler.
2195 ep = xfs_iext_get_ext(ifp, *idx);
2196 xfs_bmbt_get_all(ep, &PREV);
2197 newext = new->br_state;
2198 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2199 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2200 ASSERT(PREV.br_state == oldext);
2201 new_endoff = new->br_startoff + new->br_blockcount;
2202 ASSERT(PREV.br_startoff <= new->br_startoff);
2203 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2206 * Set flags determining what part of the previous oldext allocation
2207 * extent is being replaced by a newext allocation.
2209 if (PREV.br_startoff == new->br_startoff)
2210 state |= BMAP_LEFT_FILLING;
2211 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2212 state |= BMAP_RIGHT_FILLING;
2215 * Check and set flags if this segment has a left neighbor.
2216 * Don't set contiguous if the combined extent would be too large.
2219 state |= BMAP_LEFT_VALID;
2220 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2222 if (isnullstartblock(LEFT.br_startblock))
2223 state |= BMAP_LEFT_DELAY;
2226 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2227 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2228 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2229 LEFT.br_state == newext &&
2230 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2231 state |= BMAP_LEFT_CONTIG;
2234 * Check and set flags if this segment has a right neighbor.
2235 * Don't set contiguous if the combined extent would be too large.
2236 * Also check for all-three-contiguous being too large.
2238 if (*idx < xfs_iext_count(ifp) - 1) {
2239 state |= BMAP_RIGHT_VALID;
2240 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2241 if (isnullstartblock(RIGHT.br_startblock))
2242 state |= BMAP_RIGHT_DELAY;
2245 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2246 new_endoff == RIGHT.br_startoff &&
2247 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2248 newext == RIGHT.br_state &&
2249 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2250 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2251 BMAP_RIGHT_FILLING)) !=
2252 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2253 BMAP_RIGHT_FILLING) ||
2254 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2256 state |= BMAP_RIGHT_CONTIG;
2259 * Switch out based on the FILLING and CONTIG state bits.
2261 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2262 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2263 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2264 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2266 * Setting all of a previous oldext extent to newext.
2267 * The left and right neighbors are both contiguous with new.
2271 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2272 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2273 LEFT.br_blockcount + PREV.br_blockcount +
2274 RIGHT.br_blockcount);
2275 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2277 xfs_iext_remove(ip, *idx + 1, 2, state);
2278 XFS_IFORK_NEXT_SET(ip, whichfork,
2279 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2281 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2283 rval = XFS_ILOG_CORE;
2284 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2285 RIGHT.br_startblock,
2286 RIGHT.br_blockcount, &i)))
2288 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2289 if ((error = xfs_btree_delete(cur, &i)))
2291 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2292 if ((error = xfs_btree_decrement(cur, 0, &i)))
2294 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2295 if ((error = xfs_btree_delete(cur, &i)))
2297 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2298 if ((error = xfs_btree_decrement(cur, 0, &i)))
2300 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2301 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2303 LEFT.br_blockcount + PREV.br_blockcount +
2304 RIGHT.br_blockcount, LEFT.br_state)))
2309 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2311 * Setting all of a previous oldext extent to newext.
2312 * The left neighbor is contiguous, the right is not.
2316 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2317 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2318 LEFT.br_blockcount + PREV.br_blockcount);
2319 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2321 xfs_iext_remove(ip, *idx + 1, 1, state);
2322 XFS_IFORK_NEXT_SET(ip, whichfork,
2323 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2325 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2327 rval = XFS_ILOG_CORE;
2328 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2329 PREV.br_startblock, PREV.br_blockcount,
2332 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2333 if ((error = xfs_btree_delete(cur, &i)))
2335 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2336 if ((error = xfs_btree_decrement(cur, 0, &i)))
2338 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2339 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2341 LEFT.br_blockcount + PREV.br_blockcount,
2347 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2349 * Setting all of a previous oldext extent to newext.
2350 * The right neighbor is contiguous, the left is not.
2352 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2353 xfs_bmbt_set_blockcount(ep,
2354 PREV.br_blockcount + RIGHT.br_blockcount);
2355 xfs_bmbt_set_state(ep, newext);
2356 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2357 xfs_iext_remove(ip, *idx + 1, 1, state);
2358 XFS_IFORK_NEXT_SET(ip, whichfork,
2359 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2361 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2363 rval = XFS_ILOG_CORE;
2364 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2365 RIGHT.br_startblock,
2366 RIGHT.br_blockcount, &i)))
2368 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2369 if ((error = xfs_btree_delete(cur, &i)))
2371 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2372 if ((error = xfs_btree_decrement(cur, 0, &i)))
2374 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2375 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2377 new->br_blockcount + RIGHT.br_blockcount,
2383 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2385 * Setting all of a previous oldext extent to newext.
2386 * Neither the left nor right neighbors are contiguous with
2389 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2390 xfs_bmbt_set_state(ep, newext);
2391 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2394 rval = XFS_ILOG_DEXT;
2397 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2398 new->br_startblock, new->br_blockcount,
2401 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2402 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2403 new->br_startblock, new->br_blockcount,
2409 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2411 * Setting the first part of a previous oldext extent to newext.
2412 * The left neighbor is contiguous.
2414 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2415 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2416 LEFT.br_blockcount + new->br_blockcount);
2417 xfs_bmbt_set_startoff(ep,
2418 PREV.br_startoff + new->br_blockcount);
2419 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2421 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2422 xfs_bmbt_set_startblock(ep,
2423 new->br_startblock + new->br_blockcount);
2424 xfs_bmbt_set_blockcount(ep,
2425 PREV.br_blockcount - new->br_blockcount);
2426 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2431 rval = XFS_ILOG_DEXT;
2434 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2435 PREV.br_startblock, PREV.br_blockcount,
2438 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2439 if ((error = xfs_bmbt_update(cur,
2440 PREV.br_startoff + new->br_blockcount,
2441 PREV.br_startblock + new->br_blockcount,
2442 PREV.br_blockcount - new->br_blockcount,
2445 if ((error = xfs_btree_decrement(cur, 0, &i)))
2447 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2449 LEFT.br_blockcount + new->br_blockcount,
2456 case BMAP_LEFT_FILLING:
2458 * Setting the first part of a previous oldext extent to newext.
2459 * The left neighbor is not contiguous.
2461 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2462 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2463 xfs_bmbt_set_startoff(ep, new_endoff);
2464 xfs_bmbt_set_blockcount(ep,
2465 PREV.br_blockcount - new->br_blockcount);
2466 xfs_bmbt_set_startblock(ep,
2467 new->br_startblock + new->br_blockcount);
2468 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2470 xfs_iext_insert(ip, *idx, 1, new, state);
2471 XFS_IFORK_NEXT_SET(ip, whichfork,
2472 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2474 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2476 rval = XFS_ILOG_CORE;
2477 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2478 PREV.br_startblock, PREV.br_blockcount,
2481 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2482 if ((error = xfs_bmbt_update(cur,
2483 PREV.br_startoff + new->br_blockcount,
2484 PREV.br_startblock + new->br_blockcount,
2485 PREV.br_blockcount - new->br_blockcount,
2488 cur->bc_rec.b = *new;
2489 if ((error = xfs_btree_insert(cur, &i)))
2491 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2495 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2497 * Setting the last part of a previous oldext extent to newext.
2498 * The right neighbor is contiguous with the new allocation.
2500 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2501 xfs_bmbt_set_blockcount(ep,
2502 PREV.br_blockcount - new->br_blockcount);
2503 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2507 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2508 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2509 new->br_startoff, new->br_startblock,
2510 new->br_blockcount + RIGHT.br_blockcount, newext);
2511 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2514 rval = XFS_ILOG_DEXT;
2517 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2519 PREV.br_blockcount, &i)))
2521 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2522 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2524 PREV.br_blockcount - new->br_blockcount,
2527 if ((error = xfs_btree_increment(cur, 0, &i)))
2529 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2531 new->br_blockcount + RIGHT.br_blockcount,
2537 case BMAP_RIGHT_FILLING:
2539 * Setting the last part of a previous oldext extent to newext.
2540 * The right neighbor is not contiguous.
2542 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2543 xfs_bmbt_set_blockcount(ep,
2544 PREV.br_blockcount - new->br_blockcount);
2545 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2548 xfs_iext_insert(ip, *idx, 1, new, state);
2550 XFS_IFORK_NEXT_SET(ip, whichfork,
2551 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2553 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2555 rval = XFS_ILOG_CORE;
2556 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2557 PREV.br_startblock, PREV.br_blockcount,
2560 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2561 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2563 PREV.br_blockcount - new->br_blockcount,
2566 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2567 new->br_startblock, new->br_blockcount,
2570 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2571 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2572 if ((error = xfs_btree_insert(cur, &i)))
2574 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2580 * Setting the middle part of a previous oldext extent to
2581 * newext. Contiguity is impossible here.
2582 * One extent becomes three extents.
2584 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2585 xfs_bmbt_set_blockcount(ep,
2586 new->br_startoff - PREV.br_startoff);
2587 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2590 r[1].br_startoff = new_endoff;
2591 r[1].br_blockcount =
2592 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2593 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2594 r[1].br_state = oldext;
2597 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2599 XFS_IFORK_NEXT_SET(ip, whichfork,
2600 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2602 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2604 rval = XFS_ILOG_CORE;
2605 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2606 PREV.br_startblock, PREV.br_blockcount,
2609 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2610 /* new right extent - oldext */
2611 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2612 r[1].br_startblock, r[1].br_blockcount,
2615 /* new left extent - oldext */
2616 cur->bc_rec.b = PREV;
2617 cur->bc_rec.b.br_blockcount =
2618 new->br_startoff - PREV.br_startoff;
2619 if ((error = xfs_btree_insert(cur, &i)))
2621 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2623 * Reset the cursor to the position of the new extent
2624 * we are about to insert as we can't trust it after
2625 * the previous insert.
2627 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2628 new->br_startblock, new->br_blockcount,
2631 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2632 /* new middle extent - newext */
2633 cur->bc_rec.b.br_state = new->br_state;
2634 if ((error = xfs_btree_insert(cur, &i)))
2636 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2640 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2641 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2642 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2643 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2644 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2645 case BMAP_LEFT_CONTIG:
2646 case BMAP_RIGHT_CONTIG:
2648 * These cases are all impossible.
2653 /* update reverse mappings */
2654 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2658 /* convert to a btree if necessary */
2659 if (xfs_bmap_needs_btree(ip, whichfork)) {
2660 int tmp_logflags; /* partial log flag return val */
2662 ASSERT(cur == NULL);
2663 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2664 0, &tmp_logflags, whichfork);
2665 *logflagsp |= tmp_logflags;
2670 /* clear out the allocated field, done with it now in any case. */
2672 cur->bc_private.b.allocated = 0;
2676 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2686 * Convert a hole to a delayed allocation.
2689 xfs_bmap_add_extent_hole_delay(
2690 xfs_inode_t *ip, /* incore inode pointer */
2692 xfs_extnum_t *idx, /* extent number to update/insert */
2693 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2695 xfs_ifork_t *ifp; /* inode fork pointer */
2696 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2697 xfs_filblks_t newlen=0; /* new indirect size */
2698 xfs_filblks_t oldlen=0; /* old indirect size */
2699 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2700 int state; /* state bits, accessed thru macros */
2701 xfs_filblks_t temp=0; /* temp for indirect calculations */
2703 ifp = XFS_IFORK_PTR(ip, whichfork);
2705 if (whichfork == XFS_COW_FORK)
2706 state |= BMAP_COWFORK;
2707 ASSERT(isnullstartblock(new->br_startblock));
2710 * Check and set flags if this segment has a left neighbor
2713 state |= BMAP_LEFT_VALID;
2714 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2716 if (isnullstartblock(left.br_startblock))
2717 state |= BMAP_LEFT_DELAY;
2721 * Check and set flags if the current (right) segment exists.
2722 * If it doesn't exist, we're converting the hole at end-of-file.
2724 if (*idx < xfs_iext_count(ifp)) {
2725 state |= BMAP_RIGHT_VALID;
2726 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2728 if (isnullstartblock(right.br_startblock))
2729 state |= BMAP_RIGHT_DELAY;
2733 * Set contiguity flags on the left and right neighbors.
2734 * Don't let extents get too large, even if the pieces are contiguous.
2736 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2737 left.br_startoff + left.br_blockcount == new->br_startoff &&
2738 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2739 state |= BMAP_LEFT_CONTIG;
2741 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2742 new->br_startoff + new->br_blockcount == right.br_startoff &&
2743 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2744 (!(state & BMAP_LEFT_CONTIG) ||
2745 (left.br_blockcount + new->br_blockcount +
2746 right.br_blockcount <= MAXEXTLEN)))
2747 state |= BMAP_RIGHT_CONTIG;
2750 * Switch out based on the contiguity flags.
2752 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2753 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2755 * New allocation is contiguous with delayed allocations
2756 * on the left and on the right.
2757 * Merge all three into a single extent record.
2760 temp = left.br_blockcount + new->br_blockcount +
2761 right.br_blockcount;
2763 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2764 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2765 oldlen = startblockval(left.br_startblock) +
2766 startblockval(new->br_startblock) +
2767 startblockval(right.br_startblock);
2768 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2770 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2771 nullstartblock((int)newlen));
2772 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2774 xfs_iext_remove(ip, *idx + 1, 1, state);
2777 case BMAP_LEFT_CONTIG:
2779 * New allocation is contiguous with a delayed allocation
2781 * Merge the new allocation with the left neighbor.
2784 temp = left.br_blockcount + new->br_blockcount;
2786 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2787 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2788 oldlen = startblockval(left.br_startblock) +
2789 startblockval(new->br_startblock);
2790 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2792 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2793 nullstartblock((int)newlen));
2794 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2797 case BMAP_RIGHT_CONTIG:
2799 * New allocation is contiguous with a delayed allocation
2801 * Merge the new allocation with the right neighbor.
2803 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2804 temp = new->br_blockcount + right.br_blockcount;
2805 oldlen = startblockval(new->br_startblock) +
2806 startblockval(right.br_startblock);
2807 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2809 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2811 nullstartblock((int)newlen), temp, right.br_state);
2812 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2817 * New allocation is not contiguous with another
2818 * delayed allocation.
2819 * Insert a new entry.
2821 oldlen = newlen = 0;
2822 xfs_iext_insert(ip, *idx, 1, new, state);
2825 if (oldlen != newlen) {
2826 ASSERT(oldlen > newlen);
2827 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2830 * Nothing to do for disk quota accounting here.
2836 * Convert a hole to a real allocation.
2838 STATIC int /* error */
2839 xfs_bmap_add_extent_hole_real(
2840 struct xfs_trans *tp,
2841 struct xfs_inode *ip,
2844 struct xfs_btree_cur **curp,
2845 struct xfs_bmbt_irec *new,
2846 xfs_fsblock_t *first,
2847 struct xfs_defer_ops *dfops,
2850 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2851 struct xfs_mount *mp = ip->i_mount;
2852 struct xfs_btree_cur *cur = *curp;
2853 int error; /* error return value */
2854 int i; /* temp state */
2855 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2856 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2857 int rval=0; /* return value (logging flags) */
2858 int state; /* state bits, accessed thru macros */
2861 ASSERT(*idx <= xfs_iext_count(ifp));
2862 ASSERT(!isnullstartblock(new->br_startblock));
2863 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2865 XFS_STATS_INC(mp, xs_add_exlist);
2868 if (whichfork == XFS_ATTR_FORK)
2869 state |= BMAP_ATTRFORK;
2870 if (whichfork == XFS_COW_FORK)
2871 state |= BMAP_COWFORK;
2874 * Check and set flags if this segment has a left neighbor.
2877 state |= BMAP_LEFT_VALID;
2878 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2879 if (isnullstartblock(left.br_startblock))
2880 state |= BMAP_LEFT_DELAY;
2884 * Check and set flags if this segment has a current value.
2885 * Not true if we're inserting into the "hole" at eof.
2887 if (*idx < xfs_iext_count(ifp)) {
2888 state |= BMAP_RIGHT_VALID;
2889 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2890 if (isnullstartblock(right.br_startblock))
2891 state |= BMAP_RIGHT_DELAY;
2895 * We're inserting a real allocation between "left" and "right".
2896 * Set the contiguity flags. Don't let extents get too large.
2898 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2899 left.br_startoff + left.br_blockcount == new->br_startoff &&
2900 left.br_startblock + left.br_blockcount == new->br_startblock &&
2901 left.br_state == new->br_state &&
2902 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2903 state |= BMAP_LEFT_CONTIG;
2905 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2906 new->br_startoff + new->br_blockcount == right.br_startoff &&
2907 new->br_startblock + new->br_blockcount == right.br_startblock &&
2908 new->br_state == right.br_state &&
2909 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2910 (!(state & BMAP_LEFT_CONTIG) ||
2911 left.br_blockcount + new->br_blockcount +
2912 right.br_blockcount <= MAXEXTLEN))
2913 state |= BMAP_RIGHT_CONTIG;
2917 * Select which case we're in here, and implement it.
2919 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2920 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2922 * New allocation is contiguous with real allocations on the
2923 * left and on the right.
2924 * Merge all three into a single extent record.
2927 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2928 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2929 left.br_blockcount + new->br_blockcount +
2930 right.br_blockcount);
2931 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2933 xfs_iext_remove(ip, *idx + 1, 1, state);
2935 XFS_IFORK_NEXT_SET(ip, whichfork,
2936 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2938 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2940 rval = XFS_ILOG_CORE;
2941 error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
2942 right.br_startblock, right.br_blockcount,
2946 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2947 error = xfs_btree_delete(cur, &i);
2950 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2951 error = xfs_btree_decrement(cur, 0, &i);
2954 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2955 error = xfs_bmbt_update(cur, left.br_startoff,
2957 left.br_blockcount +
2958 new->br_blockcount +
2959 right.br_blockcount,
2966 case BMAP_LEFT_CONTIG:
2968 * New allocation is contiguous with a real allocation
2970 * Merge the new allocation with the left neighbor.
2973 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2974 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2975 left.br_blockcount + new->br_blockcount);
2976 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2979 rval = xfs_ilog_fext(whichfork);
2982 error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
2983 left.br_startblock, left.br_blockcount,
2987 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2988 error = xfs_bmbt_update(cur, left.br_startoff,
2990 left.br_blockcount +
2998 case BMAP_RIGHT_CONTIG:
3000 * New allocation is contiguous with a real allocation
3002 * Merge the new allocation with the right neighbor.
3004 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
3005 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
3006 new->br_startoff, new->br_startblock,
3007 new->br_blockcount + right.br_blockcount,
3009 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
3012 rval = xfs_ilog_fext(whichfork);
3015 error = xfs_bmbt_lookup_eq(cur,
3017 right.br_startblock,
3018 right.br_blockcount, &i);
3021 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3022 error = xfs_bmbt_update(cur, new->br_startoff,
3024 new->br_blockcount +
3025 right.br_blockcount,
3034 * New allocation is not contiguous with another
3036 * Insert a new entry.
3038 xfs_iext_insert(ip, *idx, 1, new, state);
3039 XFS_IFORK_NEXT_SET(ip, whichfork,
3040 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3042 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3044 rval = XFS_ILOG_CORE;
3045 error = xfs_bmbt_lookup_eq(cur,
3048 new->br_blockcount, &i);
3051 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3052 cur->bc_rec.b.br_state = new->br_state;
3053 error = xfs_btree_insert(cur, &i);
3056 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3061 /* add reverse mapping */
3062 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
3066 /* convert to a btree if necessary */
3067 if (xfs_bmap_needs_btree(ip, whichfork)) {
3068 int tmp_logflags; /* partial log flag return val */
3070 ASSERT(cur == NULL);
3071 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
3072 0, &tmp_logflags, whichfork);
3073 *logflagsp |= tmp_logflags;
3079 /* clear out the allocated field, done with it now in any case. */
3081 cur->bc_private.b.allocated = 0;
3083 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
3090 * Functions used in the extent read, allocate and remove paths
3094 * Adjust the size of the new extent based on di_extsize and rt extsize.
3097 xfs_bmap_extsize_align(
3099 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3100 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3101 xfs_extlen_t extsz, /* align to this extent size */
3102 int rt, /* is this a realtime inode? */
3103 int eof, /* is extent at end-of-file? */
3104 int delay, /* creating delalloc extent? */
3105 int convert, /* overwriting unwritten extent? */
3106 xfs_fileoff_t *offp, /* in/out: aligned offset */
3107 xfs_extlen_t *lenp) /* in/out: aligned length */
3109 xfs_fileoff_t orig_off; /* original offset */
3110 xfs_extlen_t orig_alen; /* original length */
3111 xfs_fileoff_t orig_end; /* original off+len */
3112 xfs_fileoff_t nexto; /* next file offset */
3113 xfs_fileoff_t prevo; /* previous file offset */
3114 xfs_fileoff_t align_off; /* temp for offset */
3115 xfs_extlen_t align_alen; /* temp for length */
3116 xfs_extlen_t temp; /* temp for calculations */
3121 orig_off = align_off = *offp;
3122 orig_alen = align_alen = *lenp;
3123 orig_end = orig_off + orig_alen;
3126 * If this request overlaps an existing extent, then don't
3127 * attempt to perform any additional alignment.
3129 if (!delay && !eof &&
3130 (orig_off >= gotp->br_startoff) &&
3131 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3136 * If the file offset is unaligned vs. the extent size
3137 * we need to align it. This will be possible unless
3138 * the file was previously written with a kernel that didn't
3139 * perform this alignment, or if a truncate shot us in the
3142 temp = do_mod(orig_off, extsz);
3148 /* Same adjustment for the end of the requested area. */
3149 temp = (align_alen % extsz);
3151 align_alen += extsz - temp;
3154 * For large extent hint sizes, the aligned extent might be larger than
3155 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3156 * the length back under MAXEXTLEN. The outer allocation loops handle
3157 * short allocation just fine, so it is safe to do this. We only want to
3158 * do it when we are forced to, though, because it means more allocation
3159 * operations are required.
3161 while (align_alen > MAXEXTLEN)
3162 align_alen -= extsz;
3163 ASSERT(align_alen <= MAXEXTLEN);
3166 * If the previous block overlaps with this proposed allocation
3167 * then move the start forward without adjusting the length.
3169 if (prevp->br_startoff != NULLFILEOFF) {
3170 if (prevp->br_startblock == HOLESTARTBLOCK)
3171 prevo = prevp->br_startoff;
3173 prevo = prevp->br_startoff + prevp->br_blockcount;
3176 if (align_off != orig_off && align_off < prevo)
3179 * If the next block overlaps with this proposed allocation
3180 * then move the start back without adjusting the length,
3181 * but not before offset 0.
3182 * This may of course make the start overlap previous block,
3183 * and if we hit the offset 0 limit then the next block
3184 * can still overlap too.
3186 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3187 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3188 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3189 nexto = gotp->br_startoff + gotp->br_blockcount;
3191 nexto = gotp->br_startoff;
3193 nexto = NULLFILEOFF;
3195 align_off + align_alen != orig_end &&
3196 align_off + align_alen > nexto)
3197 align_off = nexto > align_alen ? nexto - align_alen : 0;
3199 * If we're now overlapping the next or previous extent that
3200 * means we can't fit an extsz piece in this hole. Just move
3201 * the start forward to the first valid spot and set
3202 * the length so we hit the end.
3204 if (align_off != orig_off && align_off < prevo)
3206 if (align_off + align_alen != orig_end &&
3207 align_off + align_alen > nexto &&
3208 nexto != NULLFILEOFF) {
3209 ASSERT(nexto > prevo);
3210 align_alen = nexto - align_off;
3214 * If realtime, and the result isn't a multiple of the realtime
3215 * extent size we need to remove blocks until it is.
3217 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3219 * We're not covering the original request, or
3220 * we won't be able to once we fix the length.
3222 if (orig_off < align_off ||
3223 orig_end > align_off + align_alen ||
3224 align_alen - temp < orig_alen)
3227 * Try to fix it by moving the start up.
3229 if (align_off + temp <= orig_off) {
3234 * Try to fix it by moving the end in.
3236 else if (align_off + align_alen - temp >= orig_end)
3239 * Set the start to the minimum then trim the length.
3242 align_alen -= orig_off - align_off;
3243 align_off = orig_off;
3244 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3247 * Result doesn't cover the request, fail it.
3249 if (orig_off < align_off || orig_end > align_off + align_alen)
3252 ASSERT(orig_off >= align_off);
3253 /* see MAXEXTLEN handling above */
3254 ASSERT(orig_end <= align_off + align_alen ||
3255 align_alen + extsz > MAXEXTLEN);
3259 if (!eof && gotp->br_startoff != NULLFILEOFF)
3260 ASSERT(align_off + align_alen <= gotp->br_startoff);
3261 if (prevp->br_startoff != NULLFILEOFF)
3262 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3270 #define XFS_ALLOC_GAP_UNITS 4
3274 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3276 xfs_fsblock_t adjust; /* adjustment to block numbers */
3277 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3278 xfs_mount_t *mp; /* mount point structure */
3279 int nullfb; /* true if ap->firstblock isn't set */
3280 int rt; /* true if inode is realtime */
3282 #define ISVALID(x,y) \
3284 (x) < mp->m_sb.sb_rblocks : \
3285 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3286 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3287 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3289 mp = ap->ip->i_mount;
3290 nullfb = *ap->firstblock == NULLFSBLOCK;
3291 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3292 xfs_alloc_is_userdata(ap->datatype);
3293 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3295 * If allocating at eof, and there's a previous real block,
3296 * try to use its last block as our starting point.
3298 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3299 !isnullstartblock(ap->prev.br_startblock) &&
3300 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3301 ap->prev.br_startblock)) {
3302 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3304 * Adjust for the gap between prevp and us.
3306 adjust = ap->offset -
3307 (ap->prev.br_startoff + ap->prev.br_blockcount);
3309 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3310 ap->blkno += adjust;
3313 * If not at eof, then compare the two neighbor blocks.
3314 * Figure out whether either one gives us a good starting point,
3315 * and pick the better one.
3317 else if (!ap->eof) {
3318 xfs_fsblock_t gotbno; /* right side block number */
3319 xfs_fsblock_t gotdiff=0; /* right side difference */
3320 xfs_fsblock_t prevbno; /* left side block number */
3321 xfs_fsblock_t prevdiff=0; /* left side difference */
3324 * If there's a previous (left) block, select a requested
3325 * start block based on it.
3327 if (ap->prev.br_startoff != NULLFILEOFF &&
3328 !isnullstartblock(ap->prev.br_startblock) &&
3329 (prevbno = ap->prev.br_startblock +
3330 ap->prev.br_blockcount) &&
3331 ISVALID(prevbno, ap->prev.br_startblock)) {
3333 * Calculate gap to end of previous block.
3335 adjust = prevdiff = ap->offset -
3336 (ap->prev.br_startoff +
3337 ap->prev.br_blockcount);
3339 * Figure the startblock based on the previous block's
3340 * end and the gap size.
3342 * If the gap is large relative to the piece we're
3343 * allocating, or using it gives us an invalid block
3344 * number, then just use the end of the previous block.
3346 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3347 ISVALID(prevbno + prevdiff,
3348 ap->prev.br_startblock))
3353 * If the firstblock forbids it, can't use it,
3356 if (!rt && !nullfb &&
3357 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3358 prevbno = NULLFSBLOCK;
3361 * No previous block or can't follow it, just default.
3364 prevbno = NULLFSBLOCK;
3366 * If there's a following (right) block, select a requested
3367 * start block based on it.
3369 if (!isnullstartblock(ap->got.br_startblock)) {
3371 * Calculate gap to start of next block.
3373 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3375 * Figure the startblock based on the next block's
3376 * start and the gap size.
3378 gotbno = ap->got.br_startblock;
3381 * If the gap is large relative to the piece we're
3382 * allocating, or using it gives us an invalid block
3383 * number, then just use the start of the next block
3384 * offset by our length.
3386 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3387 ISVALID(gotbno - gotdiff, gotbno))
3389 else if (ISVALID(gotbno - ap->length, gotbno)) {
3390 gotbno -= ap->length;
3391 gotdiff += adjust - ap->length;
3395 * If the firstblock forbids it, can't use it,
3398 if (!rt && !nullfb &&
3399 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3400 gotbno = NULLFSBLOCK;
3403 * No next block, just default.
3406 gotbno = NULLFSBLOCK;
3408 * If both valid, pick the better one, else the only good
3409 * one, else ap->blkno is already set (to 0 or the inode block).
3411 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3412 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3413 else if (prevbno != NULLFSBLOCK)
3414 ap->blkno = prevbno;
3415 else if (gotbno != NULLFSBLOCK)
3422 xfs_bmap_longest_free_extent(
3423 struct xfs_trans *tp,
3428 struct xfs_mount *mp = tp->t_mountp;
3429 struct xfs_perag *pag;
3430 xfs_extlen_t longest;
3433 pag = xfs_perag_get(mp, ag);
3434 if (!pag->pagf_init) {
3435 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3439 if (!pag->pagf_init) {
3445 longest = xfs_alloc_longest_free_extent(mp, pag,
3446 xfs_alloc_min_freelist(mp, pag),
3447 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3448 if (*blen < longest)
3457 xfs_bmap_select_minlen(
3458 struct xfs_bmalloca *ap,
3459 struct xfs_alloc_arg *args,
3463 if (notinit || *blen < ap->minlen) {
3465 * Since we did a BUF_TRYLOCK above, it is possible that
3466 * there is space for this request.
3468 args->minlen = ap->minlen;
3469 } else if (*blen < args->maxlen) {
3471 * If the best seen length is less than the request length,
3472 * use the best as the minimum.
3474 args->minlen = *blen;
3477 * Otherwise we've seen an extent as big as maxlen, use that
3480 args->minlen = args->maxlen;
3485 xfs_bmap_btalloc_nullfb(
3486 struct xfs_bmalloca *ap,
3487 struct xfs_alloc_arg *args,
3490 struct xfs_mount *mp = ap->ip->i_mount;
3491 xfs_agnumber_t ag, startag;
3495 args->type = XFS_ALLOCTYPE_START_BNO;
3496 args->total = ap->total;
3498 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3499 if (startag == NULLAGNUMBER)
3502 while (*blen < args->maxlen) {
3503 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3508 if (++ag == mp->m_sb.sb_agcount)
3514 xfs_bmap_select_minlen(ap, args, blen, notinit);
3519 xfs_bmap_btalloc_filestreams(
3520 struct xfs_bmalloca *ap,
3521 struct xfs_alloc_arg *args,
3524 struct xfs_mount *mp = ap->ip->i_mount;
3529 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3530 args->total = ap->total;
3532 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3533 if (ag == NULLAGNUMBER)
3536 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3540 if (*blen < args->maxlen) {
3541 error = xfs_filestream_new_ag(ap, &ag);
3545 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3552 xfs_bmap_select_minlen(ap, args, blen, notinit);
3555 * Set the failure fallback case to look in the selected AG as stream
3558 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3564 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3566 xfs_mount_t *mp; /* mount point structure */
3567 xfs_alloctype_t atype = 0; /* type for allocation routines */
3568 xfs_extlen_t align = 0; /* minimum allocation alignment */
3569 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3571 xfs_alloc_arg_t args;
3573 xfs_extlen_t nextminlen = 0;
3574 int nullfb; /* true if ap->firstblock isn't set */
3582 mp = ap->ip->i_mount;
3584 /* stripe alignment for allocation is determined by mount parameters */
3586 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3587 stripe_align = mp->m_swidth;
3588 else if (mp->m_dalign)
3589 stripe_align = mp->m_dalign;
3591 if (ap->flags & XFS_BMAPI_COWFORK)
3592 align = xfs_get_cowextsz_hint(ap->ip);
3593 else if (xfs_alloc_is_userdata(ap->datatype))
3594 align = xfs_get_extsz_hint(ap->ip);
3596 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3597 align, 0, ap->eof, 0, ap->conv,
3598 &ap->offset, &ap->length);
3604 nullfb = *ap->firstblock == NULLFSBLOCK;
3605 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3607 if (xfs_alloc_is_userdata(ap->datatype) &&
3608 xfs_inode_is_filestream(ap->ip)) {
3609 ag = xfs_filestream_lookup_ag(ap->ip);
3610 ag = (ag != NULLAGNUMBER) ? ag : 0;
3611 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3613 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3616 ap->blkno = *ap->firstblock;
3618 xfs_bmap_adjacent(ap);
3621 * If allowed, use ap->blkno; otherwise must use firstblock since
3622 * it's in the right allocation group.
3624 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3627 ap->blkno = *ap->firstblock;
3629 * Normal allocation, done through xfs_alloc_vextent.
3631 tryagain = isaligned = 0;
3632 memset(&args, 0, sizeof(args));
3635 args.fsbno = ap->blkno;
3636 xfs_rmap_skip_owner_update(&args.oinfo);
3638 /* Trim the allocation back to the maximum an AG can fit. */
3639 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3640 args.firstblock = *ap->firstblock;
3644 * Search for an allocation group with a single extent large
3645 * enough for the request. If one isn't found, then adjust
3646 * the minimum allocation size to the largest space found.
3648 if (xfs_alloc_is_userdata(ap->datatype) &&
3649 xfs_inode_is_filestream(ap->ip))
3650 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3652 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3655 } else if (ap->dfops->dop_low) {
3656 if (xfs_inode_is_filestream(ap->ip))
3657 args.type = XFS_ALLOCTYPE_FIRST_AG;
3659 args.type = XFS_ALLOCTYPE_START_BNO;
3660 args.total = args.minlen = ap->minlen;
3662 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3663 args.total = ap->total;
3664 args.minlen = ap->minlen;
3666 /* apply extent size hints if obtained earlier */
3669 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3670 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3671 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3675 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3676 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3677 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3680 * If we are not low on available data blocks, and the
3681 * underlying logical volume manager is a stripe, and
3682 * the file offset is zero then try to allocate data
3683 * blocks on stripe unit boundary.
3684 * NOTE: ap->aeof is only set if the allocation length
3685 * is >= the stripe unit and the allocation offset is
3686 * at the end of file.
3688 if (!ap->dfops->dop_low && ap->aeof) {
3690 args.alignment = stripe_align;
3694 * Adjust for alignment
3696 if (blen > args.alignment && blen <= args.maxlen)
3697 args.minlen = blen - args.alignment;
3698 args.minalignslop = 0;
3701 * First try an exact bno allocation.
3702 * If it fails then do a near or start bno
3703 * allocation with alignment turned on.
3707 args.type = XFS_ALLOCTYPE_THIS_BNO;
3710 * Compute the minlen+alignment for the
3711 * next case. Set slop so that the value
3712 * of minlen+alignment+slop doesn't go up
3713 * between the calls.
3715 if (blen > stripe_align && blen <= args.maxlen)
3716 nextminlen = blen - stripe_align;
3718 nextminlen = args.minlen;
3719 if (nextminlen + stripe_align > args.minlen + 1)
3721 nextminlen + stripe_align -
3724 args.minalignslop = 0;
3728 args.minalignslop = 0;
3730 args.minleft = ap->minleft;
3731 args.wasdel = ap->wasdel;
3732 args.resv = XFS_AG_RESV_NONE;
3733 args.datatype = ap->datatype;
3734 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3737 error = xfs_alloc_vextent(&args);
3741 if (tryagain && args.fsbno == NULLFSBLOCK) {
3743 * Exact allocation failed. Now try with alignment
3747 args.fsbno = ap->blkno;
3748 args.alignment = stripe_align;
3749 args.minlen = nextminlen;
3750 args.minalignslop = 0;
3752 if ((error = xfs_alloc_vextent(&args)))
3755 if (isaligned && args.fsbno == NULLFSBLOCK) {
3757 * allocation failed, so turn off alignment and
3761 args.fsbno = ap->blkno;
3763 if ((error = xfs_alloc_vextent(&args)))
3766 if (args.fsbno == NULLFSBLOCK && nullfb &&
3767 args.minlen > ap->minlen) {
3768 args.minlen = ap->minlen;
3769 args.type = XFS_ALLOCTYPE_START_BNO;
3770 args.fsbno = ap->blkno;
3771 if ((error = xfs_alloc_vextent(&args)))
3774 if (args.fsbno == NULLFSBLOCK && nullfb) {
3776 args.type = XFS_ALLOCTYPE_FIRST_AG;
3777 args.total = ap->minlen;
3778 if ((error = xfs_alloc_vextent(&args)))
3780 ap->dfops->dop_low = true;
3782 if (args.fsbno != NULLFSBLOCK) {
3784 * check the allocation happened at the same or higher AG than
3785 * the first block that was allocated.
3787 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3788 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3789 XFS_FSB_TO_AGNO(mp, args.fsbno));
3791 ap->blkno = args.fsbno;
3792 if (*ap->firstblock == NULLFSBLOCK)
3793 *ap->firstblock = args.fsbno;
3794 ASSERT(nullfb || fb_agno <= args.agno);
3795 ap->length = args.len;
3796 if (!(ap->flags & XFS_BMAPI_COWFORK))
3797 ap->ip->i_d.di_nblocks += args.len;
3798 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3800 ap->ip->i_delayed_blks -= args.len;
3802 * Adjust the disk quota also. This was reserved
3805 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3806 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3807 XFS_TRANS_DQ_BCOUNT,
3810 ap->blkno = NULLFSBLOCK;
3817 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3818 * It figures out where to ask the underlying allocator to put the new extent.
3822 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3824 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3825 xfs_alloc_is_userdata(ap->datatype))
3826 return xfs_bmap_rtalloc(ap);
3827 return xfs_bmap_btalloc(ap);
3830 /* Trim extent to fit a logical block range. */
3833 struct xfs_bmbt_irec *irec,
3837 xfs_fileoff_t distance;
3838 xfs_fileoff_t end = bno + len;
3840 if (irec->br_startoff + irec->br_blockcount <= bno ||
3841 irec->br_startoff >= end) {
3842 irec->br_blockcount = 0;
3846 if (irec->br_startoff < bno) {
3847 distance = bno - irec->br_startoff;
3848 if (isnullstartblock(irec->br_startblock))
3849 irec->br_startblock = DELAYSTARTBLOCK;
3850 if (irec->br_startblock != DELAYSTARTBLOCK &&
3851 irec->br_startblock != HOLESTARTBLOCK)
3852 irec->br_startblock += distance;
3853 irec->br_startoff += distance;
3854 irec->br_blockcount -= distance;
3857 if (end < irec->br_startoff + irec->br_blockcount) {
3858 distance = irec->br_startoff + irec->br_blockcount - end;
3859 irec->br_blockcount -= distance;
3864 * Trim the returned map to the required bounds
3868 struct xfs_bmbt_irec *mval,
3869 struct xfs_bmbt_irec *got,
3877 if ((flags & XFS_BMAPI_ENTIRE) ||
3878 got->br_startoff + got->br_blockcount <= obno) {
3880 if (isnullstartblock(got->br_startblock))
3881 mval->br_startblock = DELAYSTARTBLOCK;
3887 ASSERT((*bno >= obno) || (n == 0));
3889 mval->br_startoff = *bno;
3890 if (isnullstartblock(got->br_startblock))
3891 mval->br_startblock = DELAYSTARTBLOCK;
3893 mval->br_startblock = got->br_startblock +
3894 (*bno - got->br_startoff);
3896 * Return the minimum of what we got and what we asked for for
3897 * the length. We can use the len variable here because it is
3898 * modified below and we could have been there before coming
3899 * here if the first part of the allocation didn't overlap what
3902 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3903 got->br_blockcount - (*bno - got->br_startoff));
3904 mval->br_state = got->br_state;
3905 ASSERT(mval->br_blockcount <= len);
3910 * Update and validate the extent map to return
3913 xfs_bmapi_update_map(
3914 struct xfs_bmbt_irec **map,
3922 xfs_bmbt_irec_t *mval = *map;
3924 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3925 ((mval->br_startoff + mval->br_blockcount) <= end));
3926 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3927 (mval->br_startoff < obno));
3929 *bno = mval->br_startoff + mval->br_blockcount;
3931 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3932 /* update previous map with new information */
3933 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3934 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3935 ASSERT(mval->br_state == mval[-1].br_state);
3936 mval[-1].br_blockcount = mval->br_blockcount;
3937 mval[-1].br_state = mval->br_state;
3938 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3939 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3940 mval[-1].br_startblock != HOLESTARTBLOCK &&
3941 mval->br_startblock == mval[-1].br_startblock +
3942 mval[-1].br_blockcount &&
3943 ((flags & XFS_BMAPI_IGSTATE) ||
3944 mval[-1].br_state == mval->br_state)) {
3945 ASSERT(mval->br_startoff ==
3946 mval[-1].br_startoff + mval[-1].br_blockcount);
3947 mval[-1].br_blockcount += mval->br_blockcount;
3948 } else if (*n > 0 &&
3949 mval->br_startblock == DELAYSTARTBLOCK &&
3950 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3951 mval->br_startoff ==
3952 mval[-1].br_startoff + mval[-1].br_blockcount) {
3953 mval[-1].br_blockcount += mval->br_blockcount;
3954 mval[-1].br_state = mval->br_state;
3955 } else if (!((*n == 0) &&
3956 ((mval->br_startoff + mval->br_blockcount) <=
3965 * Map file blocks to filesystem blocks without allocation.
3969 struct xfs_inode *ip,
3972 struct xfs_bmbt_irec *mval,
3976 struct xfs_mount *mp = ip->i_mount;
3977 struct xfs_ifork *ifp;
3978 struct xfs_bmbt_irec got;
3985 int whichfork = xfs_bmapi_whichfork(flags);
3988 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3989 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
3990 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3992 if (unlikely(XFS_TEST_ERROR(
3993 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3994 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3995 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3996 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3997 return -EFSCORRUPTED;
4000 if (XFS_FORCED_SHUTDOWN(mp))
4003 XFS_STATS_INC(mp, xs_blk_mapr);
4005 ifp = XFS_IFORK_PTR(ip, whichfork);
4007 /* No CoW fork? Return a hole. */
4008 if (whichfork == XFS_COW_FORK && !ifp) {
4009 mval->br_startoff = bno;
4010 mval->br_startblock = HOLESTARTBLOCK;
4011 mval->br_blockcount = len;
4012 mval->br_state = XFS_EXT_NORM;
4017 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4018 error = xfs_iread_extents(NULL, ip, whichfork);
4023 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got))
4028 while (bno < end && n < *nmap) {
4029 /* Reading past eof, act as though there's a hole up to end. */
4031 got.br_startoff = end;
4032 if (got.br_startoff > bno) {
4033 /* Reading in a hole. */
4034 mval->br_startoff = bno;
4035 mval->br_startblock = HOLESTARTBLOCK;
4036 mval->br_blockcount =
4037 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4038 mval->br_state = XFS_EXT_NORM;
4039 bno += mval->br_blockcount;
4040 len -= mval->br_blockcount;
4046 /* set up the extent map to return. */
4047 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4048 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4050 /* If we're done, stop now. */
4051 if (bno >= end || n >= *nmap)
4054 /* Else go on to the next record. */
4055 if (!xfs_iext_get_extent(ifp, ++idx, &got))
4063 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4064 * global pool and the extent inserted into the inode in-core extent tree.
4066 * On entry, got refers to the first extent beyond the offset of the extent to
4067 * allocate or eof is specified if no such extent exists. On return, got refers
4068 * to the extent record that was inserted to the inode fork.
4070 * Note that the allocated extent may have been merged with contiguous extents
4071 * during insertion into the inode fork. Thus, got does not reflect the current
4072 * state of the inode fork on return. If necessary, the caller can use lastx to
4073 * look up the updated record in the inode fork.
4076 xfs_bmapi_reserve_delalloc(
4077 struct xfs_inode *ip,
4081 xfs_filblks_t prealloc,
4082 struct xfs_bmbt_irec *got,
4083 xfs_extnum_t *lastx,
4086 struct xfs_mount *mp = ip->i_mount;
4087 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4089 xfs_extlen_t indlen;
4090 char rt = XFS_IS_REALTIME_INODE(ip);
4093 xfs_fileoff_t aoff = off;
4096 * Cap the alloc length. Keep track of prealloc so we know whether to
4097 * tag the inode before we return.
4099 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4101 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4102 if (prealloc && alen >= len)
4103 prealloc = alen - len;
4105 /* Figure out the extent size, adjust alen */
4106 if (whichfork == XFS_COW_FORK)
4107 extsz = xfs_get_cowextsz_hint(ip);
4109 extsz = xfs_get_extsz_hint(ip);
4111 struct xfs_bmbt_irec prev;
4113 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
4114 prev.br_startoff = NULLFILEOFF;
4116 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
4117 1, 0, &aoff, &alen);
4122 extsz = alen / mp->m_sb.sb_rextsize;
4125 * Make a transaction-less quota reservation for delayed allocation
4126 * blocks. This number gets adjusted later. We return if we haven't
4127 * allocated blocks already inside this loop.
4129 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4130 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4135 * Split changing sb for alen and indlen since they could be coming
4136 * from different places.
4138 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4142 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4144 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4148 goto out_unreserve_quota;
4150 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4152 goto out_unreserve_blocks;
4155 ip->i_delayed_blks += alen;
4157 got->br_startoff = aoff;
4158 got->br_startblock = nullstartblock(indlen);
4159 got->br_blockcount = alen;
4160 got->br_state = XFS_EXT_NORM;
4162 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4165 * Tag the inode if blocks were preallocated. Note that COW fork
4166 * preallocation can occur at the start or end of the extent, even when
4167 * prealloc == 0, so we must also check the aligned offset and length.
4169 if (whichfork == XFS_DATA_FORK && prealloc)
4170 xfs_inode_set_eofblocks_tag(ip);
4171 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4172 xfs_inode_set_cowblocks_tag(ip);
4176 out_unreserve_blocks:
4178 xfs_mod_frextents(mp, extsz);
4180 xfs_mod_fdblocks(mp, alen, false);
4181 out_unreserve_quota:
4182 if (XFS_IS_QUOTA_ON(mp))
4183 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4184 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4190 struct xfs_bmalloca *bma)
4192 struct xfs_mount *mp = bma->ip->i_mount;
4193 int whichfork = xfs_bmapi_whichfork(bma->flags);
4194 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4195 int tmp_logflags = 0;
4198 ASSERT(bma->length > 0);
4201 * For the wasdelay case, we could also just allocate the stuff asked
4202 * for in this bmap call but that wouldn't be as good.
4205 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4206 bma->offset = bma->got.br_startoff;
4208 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4212 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4214 bma->length = XFS_FILBLKS_MIN(bma->length,
4215 bma->got.br_startoff - bma->offset);
4219 * Set the data type being allocated. For the data fork, the first data
4220 * in the file is treated differently to all other allocations. For the
4221 * attribute fork, we only need to ensure the allocated range is not on
4224 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4225 bma->datatype = XFS_ALLOC_NOBUSY;
4226 if (whichfork == XFS_DATA_FORK) {
4227 if (bma->offset == 0)
4228 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4230 bma->datatype |= XFS_ALLOC_USERDATA;
4232 if (bma->flags & XFS_BMAPI_ZERO)
4233 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4236 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4239 * Only want to do the alignment at the eof if it is userdata and
4240 * allocation length is larger than a stripe unit.
4242 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4243 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4244 error = xfs_bmap_isaeof(bma, whichfork);
4249 error = xfs_bmap_alloc(bma);
4254 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4255 if (bma->blkno == NULLFSBLOCK)
4257 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4258 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4259 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4260 bma->cur->bc_private.b.dfops = bma->dfops;
4263 * Bump the number of extents we've allocated
4269 bma->cur->bc_private.b.flags =
4270 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4272 bma->got.br_startoff = bma->offset;
4273 bma->got.br_startblock = bma->blkno;
4274 bma->got.br_blockcount = bma->length;
4275 bma->got.br_state = XFS_EXT_NORM;
4278 * In the data fork, a wasdelay extent has been initialized, so
4279 * shouldn't be flagged as unwritten.
4281 * For the cow fork, however, we convert delalloc reservations
4282 * (extents allocated for speculative preallocation) to
4283 * allocated unwritten extents, and only convert the unwritten
4284 * extents to real extents when we're about to write the data.
4286 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4287 (bma->flags & XFS_BMAPI_PREALLOC) &&
4288 xfs_sb_version_hasextflgbit(&mp->m_sb))
4289 bma->got.br_state = XFS_EXT_UNWRITTEN;
4292 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4294 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4295 whichfork, &bma->idx, &bma->cur, &bma->got,
4296 bma->firstblock, bma->dfops, &bma->logflags);
4298 bma->logflags |= tmp_logflags;
4303 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4304 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4305 * the neighbouring ones.
4307 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4309 ASSERT(bma->got.br_startoff <= bma->offset);
4310 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4311 bma->offset + bma->length);
4312 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4313 bma->got.br_state == XFS_EXT_UNWRITTEN);
4318 xfs_bmapi_convert_unwritten(
4319 struct xfs_bmalloca *bma,
4320 struct xfs_bmbt_irec *mval,
4324 int whichfork = xfs_bmapi_whichfork(flags);
4325 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4326 int tmp_logflags = 0;
4329 /* check if we need to do unwritten->real conversion */
4330 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4331 (flags & XFS_BMAPI_PREALLOC))
4334 /* check if we need to do real->unwritten conversion */
4335 if (mval->br_state == XFS_EXT_NORM &&
4336 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4337 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4341 * Modify (by adding) the state flag, if writing.
4343 ASSERT(mval->br_blockcount <= len);
4344 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4345 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4346 bma->ip, whichfork);
4347 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4348 bma->cur->bc_private.b.dfops = bma->dfops;
4350 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4351 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4354 * Before insertion into the bmbt, zero the range being converted
4357 if (flags & XFS_BMAPI_ZERO) {
4358 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4359 mval->br_blockcount);
4364 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4365 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
4368 * Log the inode core unconditionally in the unwritten extent conversion
4369 * path because the conversion might not have done so (e.g., if the
4370 * extent count hasn't changed). We need to make sure the inode is dirty
4371 * in the transaction for the sake of fsync(), even if nothing has
4372 * changed, because fsync() will not force the log for this transaction
4373 * unless it sees the inode pinned.
4375 * Note: If we're only converting cow fork extents, there aren't
4376 * any on-disk updates to make, so we don't need to log anything.
4378 if (whichfork != XFS_COW_FORK)
4379 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4384 * Update our extent pointer, given that
4385 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4386 * of the neighbouring ones.
4388 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4391 * We may have combined previously unwritten space with written space,
4392 * so generate another request.
4394 if (mval->br_blockcount < len)
4400 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4401 * extent state if necessary. Details behaviour is controlled by the flags
4402 * parameter. Only allocates blocks from a single allocation group, to avoid
4405 * The returned value in "firstblock" from the first call in a transaction
4406 * must be remembered and presented to subsequent calls in "firstblock".
4407 * An upper bound for the number of blocks to be allocated is supplied to
4408 * the first call in "total"; if no allocation group has that many free
4409 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4413 struct xfs_trans *tp, /* transaction pointer */
4414 struct xfs_inode *ip, /* incore inode */
4415 xfs_fileoff_t bno, /* starting file offs. mapped */
4416 xfs_filblks_t len, /* length to map in file */
4417 int flags, /* XFS_BMAPI_... */
4418 xfs_fsblock_t *firstblock, /* first allocated block
4419 controls a.g. for allocs */
4420 xfs_extlen_t total, /* total blocks needed */
4421 struct xfs_bmbt_irec *mval, /* output: map values */
4422 int *nmap, /* i/o: mval size/count */
4423 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4425 struct xfs_mount *mp = ip->i_mount;
4426 struct xfs_ifork *ifp;
4427 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4428 xfs_fileoff_t end; /* end of mapped file region */
4429 bool eof = false; /* after the end of extents */
4430 int error; /* error return */
4431 int n; /* current extent index */
4432 xfs_fileoff_t obno; /* old block number (offset) */
4433 int whichfork; /* data or attr fork */
4436 xfs_fileoff_t orig_bno; /* original block number value */
4437 int orig_flags; /* original flags arg value */
4438 xfs_filblks_t orig_len; /* original value of len arg */
4439 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4440 int orig_nmap; /* original value of *nmap */
4448 whichfork = xfs_bmapi_whichfork(flags);
4451 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4452 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4453 ASSERT(tp != NULL ||
4454 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4455 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4457 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4458 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4459 ASSERT(!(flags & XFS_BMAPI_REMAP));
4461 /* zeroing is for currently only for data extents, not metadata */
4462 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4463 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4465 * we can allocate unwritten extents or pre-zero allocated blocks,
4466 * but it makes no sense to do both at once. This would result in
4467 * zeroing the unwritten extent twice, but it still being an
4468 * unwritten extent....
4470 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4471 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4473 if (unlikely(XFS_TEST_ERROR(
4474 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4475 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4476 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4477 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4478 return -EFSCORRUPTED;
4481 if (XFS_FORCED_SHUTDOWN(mp))
4484 ifp = XFS_IFORK_PTR(ip, whichfork);
4486 XFS_STATS_INC(mp, xs_blk_mapw);
4488 if (*firstblock == NULLFSBLOCK) {
4489 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4490 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4497 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4498 error = xfs_iread_extents(tp, ip, whichfork);
4507 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got))
4509 if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev))
4510 bma.prev.br_startoff = NULLFILEOFF;
4516 bma.firstblock = firstblock;
4518 while (bno < end && n < *nmap) {
4519 bool need_alloc = false, wasdelay = false;
4521 /* in hole or beyoned EOF? */
4522 if (eof || bma.got.br_startoff > bno) {
4523 if (flags & XFS_BMAPI_DELALLOC) {
4525 * For the COW fork we can reasonably get a
4526 * request for converting an extent that races
4527 * with other threads already having converted
4528 * part of it, as there converting COW to
4529 * regular blocks is not protected using the
4532 ASSERT(flags & XFS_BMAPI_COWFORK);
4533 if (!(flags & XFS_BMAPI_COWFORK)) {
4538 if (eof || bno >= end)
4543 } else if (isnullstartblock(bma.got.br_startblock)) {
4548 * First, deal with the hole before the allocated space
4549 * that we found, if any.
4551 if (need_alloc || wasdelay) {
4553 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4554 bma.wasdel = wasdelay;
4559 * There's a 32/64 bit type mismatch between the
4560 * allocation length request (which can be 64 bits in
4561 * length) and the bma length request, which is
4562 * xfs_extlen_t and therefore 32 bits. Hence we have to
4563 * check for 32-bit overflows and handle them here.
4565 if (len > (xfs_filblks_t)MAXEXTLEN)
4566 bma.length = MAXEXTLEN;
4571 ASSERT(bma.length > 0);
4572 error = xfs_bmapi_allocate(&bma);
4575 if (bma.blkno == NULLFSBLOCK)
4579 * If this is a CoW allocation, record the data in
4580 * the refcount btree for orphan recovery.
4582 if (whichfork == XFS_COW_FORK) {
4583 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4584 bma.blkno, bma.length);
4590 /* Deal with the allocated space we found. */
4591 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4594 /* Execute unwritten extent conversion if necessary */
4595 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4596 if (error == -EAGAIN)
4601 /* update the extent map to return */
4602 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4605 * If we're done, stop now. Stop when we've allocated
4606 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4607 * the transaction may get too big.
4609 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4612 /* Else go on to the next record. */
4614 if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got))
4620 * Transform from btree to extents, give it cur.
4622 if (xfs_bmap_wants_extents(ip, whichfork)) {
4623 int tmp_logflags = 0;
4626 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4627 &tmp_logflags, whichfork);
4628 bma.logflags |= tmp_logflags;
4633 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4634 XFS_IFORK_NEXTENTS(ip, whichfork) >
4635 XFS_IFORK_MAXEXT(ip, whichfork));
4639 * Log everything. Do this after conversion, there's no point in
4640 * logging the extent records if we've converted to btree format.
4642 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4643 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4644 bma.logflags &= ~xfs_ilog_fext(whichfork);
4645 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4646 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4647 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4649 * Log whatever the flags say, even if error. Otherwise we might miss
4650 * detecting a case where the data is changed, there's an error,
4651 * and it's not logged so we don't shutdown when we should.
4654 xfs_trans_log_inode(tp, ip, bma.logflags);
4658 ASSERT(*firstblock == NULLFSBLOCK ||
4659 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4661 bma.cur->bc_private.b.firstblock));
4662 *firstblock = bma.cur->bc_private.b.firstblock;
4664 xfs_btree_del_cursor(bma.cur,
4665 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4668 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4675 struct xfs_trans *tp,
4676 struct xfs_inode *ip,
4679 xfs_fsblock_t startblock,
4680 struct xfs_defer_ops *dfops)
4682 struct xfs_mount *mp = ip->i_mount;
4683 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4684 struct xfs_btree_cur *cur = NULL;
4685 xfs_fsblock_t firstblock = NULLFSBLOCK;
4686 struct xfs_bmbt_irec got;
4688 int logflags = 0, error;
4691 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4692 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4694 if (unlikely(XFS_TEST_ERROR(
4695 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4696 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4697 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4698 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4699 return -EFSCORRUPTED;
4702 if (XFS_FORCED_SHUTDOWN(mp))
4705 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4706 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4711 if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) {
4712 /* make sure we only reflink into a hole. */
4713 ASSERT(got.br_startoff > bno);
4714 ASSERT(got.br_startoff - bno >= len);
4717 ip->i_d.di_nblocks += len;
4718 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4720 if (ifp->if_flags & XFS_IFBROOT) {
4721 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
4722 cur->bc_private.b.firstblock = firstblock;
4723 cur->bc_private.b.dfops = dfops;
4724 cur->bc_private.b.flags = 0;
4727 got.br_startoff = bno;
4728 got.br_startblock = startblock;
4729 got.br_blockcount = len;
4730 got.br_state = XFS_EXT_NORM;
4732 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur,
4733 &got, &firstblock, dfops, &logflags);
4737 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) {
4738 int tmp_logflags = 0;
4740 error = xfs_bmap_btree_to_extents(tp, ip, cur,
4741 &tmp_logflags, XFS_DATA_FORK);
4742 logflags |= tmp_logflags;
4746 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4747 logflags &= ~XFS_ILOG_DEXT;
4748 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4749 logflags &= ~XFS_ILOG_DBROOT;
4752 xfs_trans_log_inode(tp, ip, logflags);
4754 xfs_btree_del_cursor(cur,
4755 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4761 * When a delalloc extent is split (e.g., due to a hole punch), the original
4762 * indlen reservation must be shared across the two new extents that are left
4765 * Given the original reservation and the worst case indlen for the two new
4766 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4767 * reservation fairly across the two new extents. If necessary, steal available
4768 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4769 * ores == 1). The number of stolen blocks is returned. The availability and
4770 * subsequent accounting of stolen blocks is the responsibility of the caller.
4772 static xfs_filblks_t
4773 xfs_bmap_split_indlen(
4774 xfs_filblks_t ores, /* original res. */
4775 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4776 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4777 xfs_filblks_t avail) /* stealable blocks */
4779 xfs_filblks_t len1 = *indlen1;
4780 xfs_filblks_t len2 = *indlen2;
4781 xfs_filblks_t nres = len1 + len2; /* new total res. */
4782 xfs_filblks_t stolen = 0;
4783 xfs_filblks_t resfactor;
4786 * Steal as many blocks as we can to try and satisfy the worst case
4787 * indlen for both new extents.
4789 if (ores < nres && avail)
4790 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4793 /* nothing else to do if we've satisfied the new reservation */
4798 * We can't meet the total required reservation for the two extents.
4799 * Calculate the percent of the overall shortage between both extents
4800 * and apply this percentage to each of the requested indlen values.
4801 * This distributes the shortage fairly and reduces the chances that one
4802 * of the two extents is left with nothing when extents are repeatedly
4805 resfactor = (ores * 100);
4806 do_div(resfactor, nres);
4811 ASSERT(len1 + len2 <= ores);
4812 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4815 * Hand out the remainder to each extent. If one of the two reservations
4816 * is zero, we want to make sure that one gets a block first. The loop
4817 * below starts with len1, so hand len2 a block right off the bat if it
4820 ores -= (len1 + len2);
4821 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4822 if (ores && !len2 && *indlen2) {
4827 if (len1 < *indlen1) {
4833 if (len2 < *indlen2) {
4846 xfs_bmap_del_extent_delay(
4847 struct xfs_inode *ip,
4850 struct xfs_bmbt_irec *got,
4851 struct xfs_bmbt_irec *del)
4853 struct xfs_mount *mp = ip->i_mount;
4854 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4855 struct xfs_bmbt_irec new;
4856 int64_t da_old, da_new, da_diff = 0;
4857 xfs_fileoff_t del_endoff, got_endoff;
4858 xfs_filblks_t got_indlen, new_indlen, stolen;
4859 int error = 0, state = 0;
4862 XFS_STATS_INC(mp, xs_del_exlist);
4864 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4865 del_endoff = del->br_startoff + del->br_blockcount;
4866 got_endoff = got->br_startoff + got->br_blockcount;
4867 da_old = startblockval(got->br_startblock);
4871 ASSERT(*idx <= xfs_iext_count(ifp));
4872 ASSERT(del->br_blockcount > 0);
4873 ASSERT(got->br_startoff <= del->br_startoff);
4874 ASSERT(got_endoff >= del_endoff);
4877 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4879 do_div(rtexts, mp->m_sb.sb_rextsize);
4880 xfs_mod_frextents(mp, rtexts);
4884 * Update the inode delalloc counter now and wait to update the
4885 * sb counters as we might have to borrow some blocks for the
4886 * indirect block accounting.
4888 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4889 -((long)del->br_blockcount), 0,
4890 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4893 ip->i_delayed_blks -= del->br_blockcount;
4895 if (whichfork == XFS_COW_FORK)
4896 state |= BMAP_COWFORK;
4898 if (got->br_startoff == del->br_startoff)
4899 state |= BMAP_LEFT_CONTIG;
4900 if (got_endoff == del_endoff)
4901 state |= BMAP_RIGHT_CONTIG;
4903 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
4904 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
4906 * Matches the whole extent. Delete the entry.
4908 xfs_iext_remove(ip, *idx, 1, state);
4911 case BMAP_LEFT_CONTIG:
4913 * Deleting the first part of the extent.
4915 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4916 got->br_startoff = del_endoff;
4917 got->br_blockcount -= del->br_blockcount;
4918 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4919 got->br_blockcount), da_old);
4920 got->br_startblock = nullstartblock((int)da_new);
4921 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4922 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4924 case BMAP_RIGHT_CONTIG:
4926 * Deleting the last part of the extent.
4928 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4929 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4930 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4931 got->br_blockcount), da_old);
4932 got->br_startblock = nullstartblock((int)da_new);
4933 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4934 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4938 * Deleting the middle of the extent.
4940 * Distribute the original indlen reservation across the two new
4941 * extents. Steal blocks from the deleted extent if necessary.
4942 * Stealing blocks simply fudges the fdblocks accounting below.
4943 * Warn if either of the new indlen reservations is zero as this
4944 * can lead to delalloc problems.
4946 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4948 got->br_blockcount = del->br_startoff - got->br_startoff;
4949 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4951 new.br_blockcount = got_endoff - del_endoff;
4952 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4954 WARN_ON_ONCE(!got_indlen || !new_indlen);
4955 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4956 del->br_blockcount);
4958 got->br_startblock = nullstartblock((int)got_indlen);
4959 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4960 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
4962 new.br_startoff = del_endoff;
4963 new.br_state = got->br_state;
4964 new.br_startblock = nullstartblock((int)new_indlen);
4967 xfs_iext_insert(ip, *idx, 1, &new, state);
4969 da_new = got_indlen + new_indlen - stolen;
4970 del->br_blockcount -= stolen;
4974 ASSERT(da_old >= da_new);
4975 da_diff = da_old - da_new;
4977 da_diff += del->br_blockcount;
4979 xfs_mod_fdblocks(mp, da_diff, false);
4984 xfs_bmap_del_extent_cow(
4985 struct xfs_inode *ip,
4987 struct xfs_bmbt_irec *got,
4988 struct xfs_bmbt_irec *del)
4990 struct xfs_mount *mp = ip->i_mount;
4991 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4992 struct xfs_bmbt_irec new;
4993 xfs_fileoff_t del_endoff, got_endoff;
4994 int state = BMAP_COWFORK;
4996 XFS_STATS_INC(mp, xs_del_exlist);
4998 del_endoff = del->br_startoff + del->br_blockcount;
4999 got_endoff = got->br_startoff + got->br_blockcount;
5002 ASSERT(*idx <= xfs_iext_count(ifp));
5003 ASSERT(del->br_blockcount > 0);
5004 ASSERT(got->br_startoff <= del->br_startoff);
5005 ASSERT(got_endoff >= del_endoff);
5006 ASSERT(!isnullstartblock(got->br_startblock));
5008 if (got->br_startoff == del->br_startoff)
5009 state |= BMAP_LEFT_CONTIG;
5010 if (got_endoff == del_endoff)
5011 state |= BMAP_RIGHT_CONTIG;
5013 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5014 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5016 * Matches the whole extent. Delete the entry.
5018 xfs_iext_remove(ip, *idx, 1, state);
5021 case BMAP_LEFT_CONTIG:
5023 * Deleting the first part of the extent.
5025 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5026 got->br_startoff = del_endoff;
5027 got->br_blockcount -= del->br_blockcount;
5028 got->br_startblock = del->br_startblock + del->br_blockcount;
5029 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5030 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5032 case BMAP_RIGHT_CONTIG:
5034 * Deleting the last part of the extent.
5036 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5037 got->br_blockcount -= del->br_blockcount;
5038 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5039 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5043 * Deleting the middle of the extent.
5045 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5046 got->br_blockcount = del->br_startoff - got->br_startoff;
5047 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5048 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5050 new.br_startoff = del_endoff;
5051 new.br_blockcount = got_endoff - del_endoff;
5052 new.br_state = got->br_state;
5053 new.br_startblock = del->br_startblock + del->br_blockcount;
5056 xfs_iext_insert(ip, *idx, 1, &new, state);
5062 * Called by xfs_bmapi to update file extent records and the btree
5063 * after removing space (or undoing a delayed allocation).
5065 STATIC int /* error */
5066 xfs_bmap_del_extent(
5067 xfs_inode_t *ip, /* incore inode pointer */
5068 xfs_trans_t *tp, /* current transaction pointer */
5069 xfs_extnum_t *idx, /* extent number to update/delete */
5070 struct xfs_defer_ops *dfops, /* list of extents to be freed */
5071 xfs_btree_cur_t *cur, /* if null, not a btree */
5072 xfs_bmbt_irec_t *del, /* data to remove from extents */
5073 int *logflagsp, /* inode logging flags */
5074 int whichfork, /* data or attr fork */
5075 int bflags) /* bmapi flags */
5077 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
5078 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
5079 xfs_fsblock_t del_endblock=0; /* first block past del */
5080 xfs_fileoff_t del_endoff; /* first offset past del */
5081 int delay; /* current block is delayed allocated */
5082 int do_fx; /* free extent at end of routine */
5083 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
5084 int error; /* error return value */
5085 int flags; /* inode logging flags */
5086 xfs_bmbt_irec_t got; /* current extent entry */
5087 xfs_fileoff_t got_endoff; /* first offset past got */
5088 int i; /* temp state */
5089 xfs_ifork_t *ifp; /* inode fork pointer */
5090 xfs_mount_t *mp; /* mount structure */
5091 xfs_filblks_t nblks; /* quota/sb block count */
5092 xfs_bmbt_irec_t new; /* new record to be inserted */
5094 uint qfield; /* quota field to update */
5095 xfs_filblks_t temp; /* for indirect length calculations */
5096 xfs_filblks_t temp2; /* for indirect length calculations */
5100 XFS_STATS_INC(mp, xs_del_exlist);
5102 if (whichfork == XFS_ATTR_FORK)
5103 state |= BMAP_ATTRFORK;
5104 else if (whichfork == XFS_COW_FORK)
5105 state |= BMAP_COWFORK;
5107 ifp = XFS_IFORK_PTR(ip, whichfork);
5108 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
5109 ASSERT(del->br_blockcount > 0);
5110 ep = xfs_iext_get_ext(ifp, *idx);
5111 xfs_bmbt_get_all(ep, &got);
5112 ASSERT(got.br_startoff <= del->br_startoff);
5113 del_endoff = del->br_startoff + del->br_blockcount;
5114 got_endoff = got.br_startoff + got.br_blockcount;
5115 ASSERT(got_endoff >= del_endoff);
5116 delay = isnullstartblock(got.br_startblock);
5117 ASSERT(isnullstartblock(del->br_startblock) == delay);
5122 * If deleting a real allocation, must free up the disk space.
5125 flags = XFS_ILOG_CORE;
5127 * Realtime allocation. Free it and record di_nblocks update.
5129 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5133 ASSERT(do_mod(del->br_blockcount,
5134 mp->m_sb.sb_rextsize) == 0);
5135 ASSERT(do_mod(del->br_startblock,
5136 mp->m_sb.sb_rextsize) == 0);
5137 bno = del->br_startblock;
5138 len = del->br_blockcount;
5139 do_div(bno, mp->m_sb.sb_rextsize);
5140 do_div(len, mp->m_sb.sb_rextsize);
5141 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5145 nblks = len * mp->m_sb.sb_rextsize;
5146 qfield = XFS_TRANS_DQ_RTBCOUNT;
5149 * Ordinary allocation.
5153 nblks = del->br_blockcount;
5154 qfield = XFS_TRANS_DQ_BCOUNT;
5157 * Set up del_endblock and cur for later.
5159 del_endblock = del->br_startblock + del->br_blockcount;
5161 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5162 got.br_startblock, got.br_blockcount,
5165 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5167 da_old = da_new = 0;
5169 da_old = startblockval(got.br_startblock);
5176 * Set flag value to use in switch statement.
5177 * Left-contig is 2, right-contig is 1.
5179 switch (((got.br_startoff == del->br_startoff) << 1) |
5180 (got_endoff == del_endoff)) {
5183 * Matches the whole extent. Delete the entry.
5185 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5186 xfs_iext_remove(ip, *idx, 1,
5187 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
5192 XFS_IFORK_NEXT_SET(ip, whichfork,
5193 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5194 flags |= XFS_ILOG_CORE;
5196 flags |= xfs_ilog_fext(whichfork);
5199 if ((error = xfs_btree_delete(cur, &i)))
5201 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5206 * Deleting the first part of the extent.
5208 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5209 xfs_bmbt_set_startoff(ep, del_endoff);
5210 temp = got.br_blockcount - del->br_blockcount;
5211 xfs_bmbt_set_blockcount(ep, temp);
5213 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5215 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5216 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5220 xfs_bmbt_set_startblock(ep, del_endblock);
5221 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5223 flags |= xfs_ilog_fext(whichfork);
5226 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
5227 got.br_blockcount - del->br_blockcount,
5234 * Deleting the last part of the extent.
5236 temp = got.br_blockcount - del->br_blockcount;
5237 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5238 xfs_bmbt_set_blockcount(ep, temp);
5240 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5242 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5243 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5247 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5249 flags |= xfs_ilog_fext(whichfork);
5252 if ((error = xfs_bmbt_update(cur, got.br_startoff,
5254 got.br_blockcount - del->br_blockcount,
5261 * Deleting the middle of the extent.
5263 temp = del->br_startoff - got.br_startoff;
5264 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5265 xfs_bmbt_set_blockcount(ep, temp);
5266 new.br_startoff = del_endoff;
5267 temp2 = got_endoff - del_endoff;
5268 new.br_blockcount = temp2;
5269 new.br_state = got.br_state;
5271 new.br_startblock = del_endblock;
5272 flags |= XFS_ILOG_CORE;
5274 if ((error = xfs_bmbt_update(cur,
5276 got.br_startblock, temp,
5279 if ((error = xfs_btree_increment(cur, 0, &i)))
5281 cur->bc_rec.b = new;
5282 error = xfs_btree_insert(cur, &i);
5283 if (error && error != -ENOSPC)
5286 * If get no-space back from btree insert,
5287 * it tried a split, and we have a zero
5288 * block reservation.
5289 * Fix up our state and return the error.
5291 if (error == -ENOSPC) {
5293 * Reset the cursor, don't trust
5294 * it after any insert operation.
5296 if ((error = xfs_bmbt_lookup_eq(cur,
5301 XFS_WANT_CORRUPTED_GOTO(mp,
5304 * Update the btree record back
5305 * to the original value.
5307 if ((error = xfs_bmbt_update(cur,
5314 * Reset the extent record back
5315 * to the original value.
5317 xfs_bmbt_set_blockcount(ep,
5323 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5325 flags |= xfs_ilog_fext(whichfork);
5326 XFS_IFORK_NEXT_SET(ip, whichfork,
5327 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5329 xfs_filblks_t stolen;
5330 ASSERT(whichfork == XFS_DATA_FORK);
5333 * Distribute the original indlen reservation across the
5334 * two new extents. Steal blocks from the deleted extent
5335 * if necessary. Stealing blocks simply fudges the
5336 * fdblocks accounting in xfs_bunmapi().
5338 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5339 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5340 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5341 del->br_blockcount);
5342 da_new = temp + temp2 - stolen;
5343 del->br_blockcount -= stolen;
5346 * Set the reservation for each extent. Warn if either
5347 * is zero as this can lead to delalloc problems.
5349 WARN_ON_ONCE(!temp || !temp2);
5350 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5351 new.br_startblock = nullstartblock((int)temp2);
5353 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5354 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5359 /* remove reverse mapping */
5361 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5367 * If we need to, add to list of extents to delete.
5369 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5370 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5371 error = xfs_refcount_decrease_extent(mp, dfops, del);
5375 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5376 del->br_blockcount, NULL);
5380 * Adjust inode # blocks in the file.
5383 ip->i_d.di_nblocks -= nblks;
5385 * Adjust quota data.
5387 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5388 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5391 * Account for change in delayed indirect blocks.
5392 * Nothing to do for disk quota accounting here.
5394 ASSERT(da_old >= da_new);
5395 if (da_old > da_new)
5396 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5403 * Unmap (remove) blocks from a file.
5404 * If nexts is nonzero then the number of extents to remove is limited to
5405 * that value. If not all extents in the block range can be removed then
5410 xfs_trans_t *tp, /* transaction pointer */
5411 struct xfs_inode *ip, /* incore inode */
5412 xfs_fileoff_t bno, /* starting offset to unmap */
5413 xfs_filblks_t *rlen, /* i/o: amount remaining */
5414 int flags, /* misc flags */
5415 xfs_extnum_t nexts, /* number of extents max */
5416 xfs_fsblock_t *firstblock, /* first allocated block
5417 controls a.g. for allocs */
5418 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5420 xfs_btree_cur_t *cur; /* bmap btree cursor */
5421 xfs_bmbt_irec_t del; /* extent being deleted */
5422 int error; /* error return value */
5423 xfs_extnum_t extno; /* extent number in list */
5424 xfs_bmbt_irec_t got; /* current extent record */
5425 xfs_ifork_t *ifp; /* inode fork pointer */
5426 int isrt; /* freeing in rt area */
5427 xfs_extnum_t lastx; /* last extent index used */
5428 int logflags; /* transaction logging flags */
5429 xfs_extlen_t mod; /* rt extent offset */
5430 xfs_mount_t *mp; /* mount structure */
5431 xfs_fileoff_t start; /* first file offset deleted */
5432 int tmp_logflags; /* partial logging flags */
5433 int wasdel; /* was a delayed alloc extent */
5434 int whichfork; /* data or attribute fork */
5436 xfs_filblks_t len = *rlen; /* length to unmap in file */
5437 xfs_fileoff_t max_len;
5439 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5441 whichfork = xfs_bmapi_whichfork(flags);
5442 ASSERT(whichfork != XFS_COW_FORK);
5443 ifp = XFS_IFORK_PTR(ip, whichfork);
5445 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5446 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5447 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5449 return -EFSCORRUPTED;
5452 if (XFS_FORCED_SHUTDOWN(mp))
5455 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5460 * Guesstimate how many blocks we can unmap without running the risk of
5461 * blowing out the transaction with a mix of EFIs and reflink
5464 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5465 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5469 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5470 (error = xfs_iread_extents(tp, ip, whichfork)))
5472 if (xfs_iext_count(ifp) == 0) {
5476 XFS_STATS_INC(mp, xs_blk_unmap);
5477 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5479 bno = start + len - 1;
5482 * Check to see if the given block number is past the end of the
5483 * file, back up to the last block if so...
5485 if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) {
5487 xfs_iext_get_extent(ifp, --lastx, &got);
5488 bno = got.br_startoff + got.br_blockcount - 1;
5492 if (ifp->if_flags & XFS_IFBROOT) {
5493 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5494 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5495 cur->bc_private.b.firstblock = *firstblock;
5496 cur->bc_private.b.dfops = dfops;
5497 cur->bc_private.b.flags = 0;
5503 * Synchronize by locking the bitmap inode.
5505 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5506 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5507 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5508 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5512 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5513 (nexts == 0 || extno < nexts) && max_len > 0) {
5515 * Is the found extent after a hole in which bno lives?
5516 * Just back up to the previous extent, if so.
5518 if (got.br_startoff > bno) {
5521 xfs_iext_get_extent(ifp, lastx, &got);
5524 * Is the last block of this extent before the range
5525 * we're supposed to delete? If so, we're done.
5527 bno = XFS_FILEOFF_MIN(bno,
5528 got.br_startoff + got.br_blockcount - 1);
5532 * Then deal with the (possibly delayed) allocated space
5536 wasdel = isnullstartblock(del.br_startblock);
5537 if (got.br_startoff < start) {
5538 del.br_startoff = start;
5539 del.br_blockcount -= start - got.br_startoff;
5541 del.br_startblock += start - got.br_startoff;
5543 if (del.br_startoff + del.br_blockcount > bno + 1)
5544 del.br_blockcount = bno + 1 - del.br_startoff;
5546 /* How much can we safely unmap? */
5547 if (max_len < del.br_blockcount) {
5548 del.br_startoff += del.br_blockcount - max_len;
5550 del.br_startblock += del.br_blockcount - max_len;
5551 del.br_blockcount = max_len;
5554 sum = del.br_startblock + del.br_blockcount;
5556 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5558 * Realtime extent not lined up at the end.
5559 * The extent could have been split into written
5560 * and unwritten pieces, or we could just be
5561 * unmapping part of it. But we can't really
5562 * get rid of part of a realtime extent.
5564 if (del.br_state == XFS_EXT_UNWRITTEN ||
5565 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5567 * This piece is unwritten, or we're not
5568 * using unwritten extents. Skip over it.
5571 bno -= mod > del.br_blockcount ?
5572 del.br_blockcount : mod;
5573 if (bno < got.br_startoff) {
5575 xfs_bmbt_get_all(xfs_iext_get_ext(
5581 * It's written, turn it unwritten.
5582 * This is better than zeroing it.
5584 ASSERT(del.br_state == XFS_EXT_NORM);
5585 ASSERT(tp->t_blk_res > 0);
5587 * If this spans a realtime extent boundary,
5588 * chop it back to the start of the one we end at.
5590 if (del.br_blockcount > mod) {
5591 del.br_startoff += del.br_blockcount - mod;
5592 del.br_startblock += del.br_blockcount - mod;
5593 del.br_blockcount = mod;
5595 del.br_state = XFS_EXT_UNWRITTEN;
5596 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5597 whichfork, &lastx, &cur, &del,
5598 firstblock, dfops, &logflags);
5603 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5605 * Realtime extent is lined up at the end but not
5606 * at the front. We'll get rid of full extents if
5609 mod = mp->m_sb.sb_rextsize - mod;
5610 if (del.br_blockcount > mod) {
5611 del.br_blockcount -= mod;
5612 del.br_startoff += mod;
5613 del.br_startblock += mod;
5614 } else if ((del.br_startoff == start &&
5615 (del.br_state == XFS_EXT_UNWRITTEN ||
5616 tp->t_blk_res == 0)) ||
5617 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5619 * Can't make it unwritten. There isn't
5620 * a full extent here so just skip it.
5622 ASSERT(bno >= del.br_blockcount);
5623 bno -= del.br_blockcount;
5624 if (got.br_startoff > bno && --lastx >= 0)
5625 xfs_iext_get_extent(ifp, lastx, &got);
5627 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5628 struct xfs_bmbt_irec prev;
5631 * This one is already unwritten.
5632 * It must have a written left neighbor.
5633 * Unwrite the killed part of that one and
5637 xfs_iext_get_extent(ifp, lastx - 1, &prev);
5638 ASSERT(prev.br_state == XFS_EXT_NORM);
5639 ASSERT(!isnullstartblock(prev.br_startblock));
5640 ASSERT(del.br_startblock ==
5641 prev.br_startblock + prev.br_blockcount);
5642 if (prev.br_startoff < start) {
5643 mod = start - prev.br_startoff;
5644 prev.br_blockcount -= mod;
5645 prev.br_startblock += mod;
5646 prev.br_startoff = start;
5648 prev.br_state = XFS_EXT_UNWRITTEN;
5650 error = xfs_bmap_add_extent_unwritten_real(tp,
5651 ip, whichfork, &lastx, &cur,
5652 &prev, firstblock, dfops,
5658 ASSERT(del.br_state == XFS_EXT_NORM);
5659 del.br_state = XFS_EXT_UNWRITTEN;
5660 error = xfs_bmap_add_extent_unwritten_real(tp,
5661 ip, whichfork, &lastx, &cur,
5662 &del, firstblock, dfops,
5671 * If it's the case where the directory code is running
5672 * with no block reservation, and the deleted block is in
5673 * the middle of its extent, and the resulting insert
5674 * of an extent would cause transformation to btree format,
5675 * then reject it. The calling code will then swap
5676 * blocks around instead.
5677 * We have to do this now, rather than waiting for the
5678 * conversion to btree format, since the transaction
5681 if (!wasdel && tp->t_blk_res == 0 &&
5682 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5683 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5684 XFS_IFORK_MAXEXT(ip, whichfork) &&
5685 del.br_startoff > got.br_startoff &&
5686 del.br_startoff + del.br_blockcount <
5687 got.br_startoff + got.br_blockcount) {
5693 * Unreserve quota and update realtime free space, if
5694 * appropriate. If delayed allocation, update the inode delalloc
5695 * counter now and wait to update the sb counters as
5696 * xfs_bmap_del_extent() might need to borrow some blocks.
5699 ASSERT(startblockval(del.br_startblock) > 0);
5701 xfs_filblks_t rtexts;
5703 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5704 do_div(rtexts, mp->m_sb.sb_rextsize);
5705 xfs_mod_frextents(mp, (int64_t)rtexts);
5706 (void)xfs_trans_reserve_quota_nblks(NULL,
5707 ip, -((long)del.br_blockcount), 0,
5708 XFS_QMOPT_RES_RTBLKS);
5710 (void)xfs_trans_reserve_quota_nblks(NULL,
5711 ip, -((long)del.br_blockcount), 0,
5712 XFS_QMOPT_RES_REGBLKS);
5714 ip->i_delayed_blks -= del.br_blockcount;
5716 cur->bc_private.b.flags |=
5717 XFS_BTCUR_BPRV_WASDEL;
5719 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5721 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
5722 &tmp_logflags, whichfork, flags);
5723 logflags |= tmp_logflags;
5727 if (!isrt && wasdel)
5728 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5730 max_len -= del.br_blockcount;
5731 bno = del.br_startoff - 1;
5734 * If not done go on to the next (previous) record.
5736 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5738 xfs_iext_get_extent(ifp, lastx, &got);
5739 if (got.br_startoff > bno && --lastx >= 0)
5740 xfs_iext_get_extent(ifp, lastx, &got);
5745 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
5748 *rlen = bno - start + 1;
5751 * Convert to a btree if necessary.
5753 if (xfs_bmap_needs_btree(ip, whichfork)) {
5754 ASSERT(cur == NULL);
5755 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5756 &cur, 0, &tmp_logflags, whichfork);
5757 logflags |= tmp_logflags;
5762 * transform from btree to extents, give it cur
5764 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5765 ASSERT(cur != NULL);
5766 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5768 logflags |= tmp_logflags;
5773 * transform from extents to local?
5778 * Log everything. Do this after conversion, there's no point in
5779 * logging the extent records if we've converted to btree format.
5781 if ((logflags & xfs_ilog_fext(whichfork)) &&
5782 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5783 logflags &= ~xfs_ilog_fext(whichfork);
5784 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5785 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5786 logflags &= ~xfs_ilog_fbroot(whichfork);
5788 * Log inode even in the error case, if the transaction
5789 * is dirty we'll need to shut down the filesystem.
5792 xfs_trans_log_inode(tp, ip, logflags);
5795 *firstblock = cur->bc_private.b.firstblock;
5796 cur->bc_private.b.allocated = 0;
5798 xfs_btree_del_cursor(cur,
5799 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5804 /* Unmap a range of a file. */
5808 struct xfs_inode *ip,
5813 xfs_fsblock_t *firstblock,
5814 struct xfs_defer_ops *dfops,
5819 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5826 * Determine whether an extent shift can be accomplished by a merge with the
5827 * extent that precedes the target hole of the shift.
5831 struct xfs_bmbt_irec *left, /* preceding extent */
5832 struct xfs_bmbt_irec *got, /* current extent to shift */
5833 xfs_fileoff_t shift) /* shift fsb */
5835 xfs_fileoff_t startoff;
5837 startoff = got->br_startoff - shift;
5840 * The extent, once shifted, must be adjacent in-file and on-disk with
5841 * the preceding extent.
5843 if ((left->br_startoff + left->br_blockcount != startoff) ||
5844 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5845 (left->br_state != got->br_state) ||
5846 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5853 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5854 * hole in the file. If an extent shift would result in the extent being fully
5855 * adjacent to the extent that currently precedes the hole, we can merge with
5856 * the preceding extent rather than do the shift.
5858 * This function assumes the caller has verified a shift-by-merge is possible
5859 * with the provided extents via xfs_bmse_can_merge().
5863 struct xfs_inode *ip,
5865 xfs_fileoff_t shift, /* shift fsb */
5866 int current_ext, /* idx of gotp */
5867 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
5868 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
5869 struct xfs_btree_cur *cur,
5870 int *logflags) /* output */
5872 struct xfs_bmbt_irec got;
5873 struct xfs_bmbt_irec left;
5874 xfs_filblks_t blockcount;
5876 struct xfs_mount *mp = ip->i_mount;
5878 xfs_bmbt_get_all(gotp, &got);
5879 xfs_bmbt_get_all(leftp, &left);
5880 blockcount = left.br_blockcount + got.br_blockcount;
5882 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5883 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5884 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
5887 * Merge the in-core extents. Note that the host record pointers and
5888 * current_ext index are invalid once the extent has been removed via
5889 * xfs_iext_remove().
5891 xfs_bmbt_set_blockcount(leftp, blockcount);
5892 xfs_iext_remove(ip, current_ext, 1, 0);
5895 * Update the on-disk extent count, the btree if necessary and log the
5898 XFS_IFORK_NEXT_SET(ip, whichfork,
5899 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5900 *logflags |= XFS_ILOG_CORE;
5902 *logflags |= XFS_ILOG_DEXT;
5906 /* lookup and remove the extent to merge */
5907 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5908 got.br_blockcount, &i);
5911 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5913 error = xfs_btree_delete(cur, &i);
5916 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5918 /* lookup and update size of the previous extent */
5919 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5920 left.br_blockcount, &i);
5923 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5925 left.br_blockcount = blockcount;
5927 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
5928 left.br_blockcount, left.br_state);
5932 * Shift a single extent.
5936 struct xfs_inode *ip,
5938 xfs_fileoff_t offset_shift_fsb,
5940 struct xfs_bmbt_rec_host *gotp,
5941 struct xfs_btree_cur *cur,
5943 enum shift_direction direction,
5944 struct xfs_defer_ops *dfops)
5946 struct xfs_ifork *ifp;
5947 struct xfs_mount *mp;
5948 xfs_fileoff_t startoff;
5949 struct xfs_bmbt_rec_host *adj_irecp;
5950 struct xfs_bmbt_irec got;
5951 struct xfs_bmbt_irec adj_irec;
5957 ifp = XFS_IFORK_PTR(ip, whichfork);
5958 total_extents = xfs_iext_count(ifp);
5960 xfs_bmbt_get_all(gotp, &got);
5962 /* delalloc extents should be prevented by caller */
5963 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
5965 if (direction == SHIFT_LEFT) {
5966 startoff = got.br_startoff - offset_shift_fsb;
5969 * Check for merge if we've got an extent to the left,
5970 * otherwise make sure there's enough room at the start
5971 * of the file for the shift.
5973 if (!*current_ext) {
5974 if (got.br_startoff < offset_shift_fsb)
5976 goto update_current_ext;
5979 * grab the left extent and check for a large
5982 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
5983 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5986 adj_irec.br_startoff + adj_irec.br_blockcount)
5989 /* check whether to merge the extent or shift it down */
5990 if (xfs_bmse_can_merge(&adj_irec, &got,
5991 offset_shift_fsb)) {
5992 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5993 *current_ext, gotp, adj_irecp,
6001 startoff = got.br_startoff + offset_shift_fsb;
6002 /* nothing to move if this is the last extent */
6003 if (*current_ext >= (total_extents - 1))
6004 goto update_current_ext;
6006 * If this is not the last extent in the file, make sure there
6007 * is enough room between current extent and next extent for
6008 * accommodating the shift.
6010 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
6011 xfs_bmbt_get_all(adj_irecp, &adj_irec);
6012 if (startoff + got.br_blockcount > adj_irec.br_startoff)
6015 * Unlike a left shift (which involves a hole punch),
6016 * a right shift does not modify extent neighbors
6017 * in any way. We should never find mergeable extents
6018 * in this scenario. Check anyways and warn if we
6019 * encounter two extents that could be one.
6021 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
6025 * Increment the extent index for the next iteration, update the start
6026 * offset of the in-core extent and update the btree if applicable.
6029 if (direction == SHIFT_LEFT)
6033 xfs_bmbt_set_startoff(gotp, startoff);
6034 *logflags |= XFS_ILOG_CORE;
6037 *logflags |= XFS_ILOG_DEXT;
6041 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
6042 got.br_blockcount, &i);
6045 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6047 got.br_startoff = startoff;
6048 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
6049 got.br_blockcount, got.br_state);
6054 /* update reverse mapping */
6055 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec);
6058 adj_irec.br_startoff = startoff;
6059 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec);
6063 * Shift extent records to the left/right to cover/create a hole.
6065 * The maximum number of extents to be shifted in a single operation is
6066 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
6067 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
6068 * is the length by which each extent is shifted. If there is no hole to shift
6069 * the extents into, this will be considered invalid operation and we abort
6073 xfs_bmap_shift_extents(
6074 struct xfs_trans *tp,
6075 struct xfs_inode *ip,
6076 xfs_fileoff_t *next_fsb,
6077 xfs_fileoff_t offset_shift_fsb,
6079 xfs_fileoff_t stop_fsb,
6080 xfs_fsblock_t *firstblock,
6081 struct xfs_defer_ops *dfops,
6082 enum shift_direction direction,
6085 struct xfs_btree_cur *cur = NULL;
6086 struct xfs_bmbt_rec_host *gotp;
6087 struct xfs_bmbt_irec got;
6088 struct xfs_mount *mp = ip->i_mount;
6089 struct xfs_ifork *ifp;
6090 xfs_extnum_t nexts = 0;
6091 xfs_extnum_t current_ext;
6092 xfs_extnum_t total_extents;
6093 xfs_extnum_t stop_extent;
6095 int whichfork = XFS_DATA_FORK;
6098 if (unlikely(XFS_TEST_ERROR(
6099 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6100 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6101 mp, XFS_ERRTAG_BMAPIFORMAT))) {
6102 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6103 XFS_ERRLEVEL_LOW, mp);
6104 return -EFSCORRUPTED;
6107 if (XFS_FORCED_SHUTDOWN(mp))
6110 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6111 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6112 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
6113 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
6115 ifp = XFS_IFORK_PTR(ip, whichfork);
6116 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6117 /* Read in all the extents */
6118 error = xfs_iread_extents(tp, ip, whichfork);
6123 if (ifp->if_flags & XFS_IFBROOT) {
6124 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6125 cur->bc_private.b.firstblock = *firstblock;
6126 cur->bc_private.b.dfops = dfops;
6127 cur->bc_private.b.flags = 0;
6131 * There may be delalloc extents in the data fork before the range we
6132 * are collapsing out, so we cannot use the count of real extents here.
6133 * Instead we have to calculate it from the incore fork.
6135 total_extents = xfs_iext_count(ifp);
6136 if (total_extents == 0) {
6142 * In case of first right shift, we need to initialize next_fsb
6144 if (*next_fsb == NULLFSBLOCK) {
6145 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
6146 xfs_bmbt_get_all(gotp, &got);
6147 *next_fsb = got.br_startoff;
6148 if (stop_fsb > *next_fsb) {
6154 /* Lookup the extent index at which we have to stop */
6155 if (direction == SHIFT_RIGHT) {
6156 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
6157 /* Make stop_extent exclusive of shift range */
6160 stop_extent = total_extents;
6163 * Look up the extent index for the fsb where we start shifting. We can
6164 * henceforth iterate with current_ext as extent list changes are locked
6167 * gotp can be null in 2 cases: 1) if there are no extents or 2)
6168 * *next_fsb lies in a hole beyond which there are no extents. Either
6171 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext);
6177 /* some sanity checking before we finally start shifting extents */
6178 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
6179 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
6184 while (nexts++ < num_exts) {
6185 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
6186 ¤t_ext, gotp, cur, &logflags,
6191 * If there was an extent merge during the shift, the extent
6192 * count can change. Update the total and grade the next record.
6194 if (direction == SHIFT_LEFT) {
6195 total_extents = xfs_iext_count(ifp);
6196 stop_extent = total_extents;
6199 if (current_ext == stop_extent) {
6201 *next_fsb = NULLFSBLOCK;
6204 gotp = xfs_iext_get_ext(ifp, current_ext);
6208 xfs_bmbt_get_all(gotp, &got);
6209 *next_fsb = got.br_startoff;
6214 xfs_btree_del_cursor(cur,
6215 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6218 xfs_trans_log_inode(tp, ip, logflags);
6224 * Splits an extent into two extents at split_fsb block such that it is
6225 * the first block of the current_ext. @current_ext is a target extent
6226 * to be split. @split_fsb is a block where the extents is split.
6227 * If split_fsb lies in a hole or the first block of extents, just return 0.
6230 xfs_bmap_split_extent_at(
6231 struct xfs_trans *tp,
6232 struct xfs_inode *ip,
6233 xfs_fileoff_t split_fsb,
6234 xfs_fsblock_t *firstfsb,
6235 struct xfs_defer_ops *dfops)
6237 int whichfork = XFS_DATA_FORK;
6238 struct xfs_btree_cur *cur = NULL;
6239 struct xfs_bmbt_rec_host *gotp;
6240 struct xfs_bmbt_irec got;
6241 struct xfs_bmbt_irec new; /* split extent */
6242 struct xfs_mount *mp = ip->i_mount;
6243 struct xfs_ifork *ifp;
6244 xfs_fsblock_t gotblkcnt; /* new block count for got */
6245 xfs_extnum_t current_ext;
6250 if (unlikely(XFS_TEST_ERROR(
6251 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6252 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6253 mp, XFS_ERRTAG_BMAPIFORMAT))) {
6254 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6255 XFS_ERRLEVEL_LOW, mp);
6256 return -EFSCORRUPTED;
6259 if (XFS_FORCED_SHUTDOWN(mp))
6262 ifp = XFS_IFORK_PTR(ip, whichfork);
6263 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6264 /* Read in all the extents */
6265 error = xfs_iread_extents(tp, ip, whichfork);
6271 * gotp can be null in 2 cases: 1) if there are no extents
6272 * or 2) split_fsb lies in a hole beyond which there are
6273 * no extents. Either way, we are done.
6275 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext);
6279 xfs_bmbt_get_all(gotp, &got);
6282 * Check split_fsb lies in a hole or the start boundary offset
6285 if (got.br_startoff >= split_fsb)
6288 gotblkcnt = split_fsb - got.br_startoff;
6289 new.br_startoff = split_fsb;
6290 new.br_startblock = got.br_startblock + gotblkcnt;
6291 new.br_blockcount = got.br_blockcount - gotblkcnt;
6292 new.br_state = got.br_state;
6294 if (ifp->if_flags & XFS_IFBROOT) {
6295 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6296 cur->bc_private.b.firstblock = *firstfsb;
6297 cur->bc_private.b.dfops = dfops;
6298 cur->bc_private.b.flags = 0;
6299 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
6305 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6308 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
6309 got.br_blockcount = gotblkcnt;
6311 logflags = XFS_ILOG_CORE;
6313 error = xfs_bmbt_update(cur, got.br_startoff,
6320 logflags |= XFS_ILOG_DEXT;
6322 /* Add new extent */
6324 xfs_iext_insert(ip, current_ext, 1, &new, 0);
6325 XFS_IFORK_NEXT_SET(ip, whichfork,
6326 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
6329 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
6330 new.br_startblock, new.br_blockcount,
6334 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
6335 cur->bc_rec.b.br_state = new.br_state;
6337 error = xfs_btree_insert(cur, &i);
6340 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6344 * Convert to a btree if necessary.
6346 if (xfs_bmap_needs_btree(ip, whichfork)) {
6347 int tmp_logflags; /* partial log flag return val */
6349 ASSERT(cur == NULL);
6350 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
6351 &cur, 0, &tmp_logflags, whichfork);
6352 logflags |= tmp_logflags;
6357 cur->bc_private.b.allocated = 0;
6358 xfs_btree_del_cursor(cur,
6359 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6363 xfs_trans_log_inode(tp, ip, logflags);
6368 xfs_bmap_split_extent(
6369 struct xfs_inode *ip,
6370 xfs_fileoff_t split_fsb)
6372 struct xfs_mount *mp = ip->i_mount;
6373 struct xfs_trans *tp;
6374 struct xfs_defer_ops dfops;
6375 xfs_fsblock_t firstfsb;
6378 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6379 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6383 xfs_ilock(ip, XFS_ILOCK_EXCL);
6384 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6386 xfs_defer_init(&dfops, &firstfsb);
6388 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6393 error = xfs_defer_finish(&tp, &dfops, NULL);
6397 return xfs_trans_commit(tp);
6400 xfs_defer_cancel(&dfops);
6401 xfs_trans_cancel(tp);
6405 /* Deferred mapping is only for real extents in the data fork. */
6407 xfs_bmap_is_update_needed(
6408 struct xfs_bmbt_irec *bmap)
6410 return bmap->br_startblock != HOLESTARTBLOCK &&
6411 bmap->br_startblock != DELAYSTARTBLOCK;
6414 /* Record a bmap intent. */
6417 struct xfs_mount *mp,
6418 struct xfs_defer_ops *dfops,
6419 enum xfs_bmap_intent_type type,
6420 struct xfs_inode *ip,
6422 struct xfs_bmbt_irec *bmap)
6425 struct xfs_bmap_intent *bi;
6427 trace_xfs_bmap_defer(mp,
6428 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6430 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6431 ip->i_ino, whichfork,
6433 bmap->br_blockcount,
6436 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6437 INIT_LIST_HEAD(&bi->bi_list);
6440 bi->bi_whichfork = whichfork;
6441 bi->bi_bmap = *bmap;
6443 error = xfs_defer_join(dfops, bi->bi_owner);
6449 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6453 /* Map an extent into a file. */
6455 xfs_bmap_map_extent(
6456 struct xfs_mount *mp,
6457 struct xfs_defer_ops *dfops,
6458 struct xfs_inode *ip,
6459 struct xfs_bmbt_irec *PREV)
6461 if (!xfs_bmap_is_update_needed(PREV))
6464 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6465 XFS_DATA_FORK, PREV);
6468 /* Unmap an extent out of a file. */
6470 xfs_bmap_unmap_extent(
6471 struct xfs_mount *mp,
6472 struct xfs_defer_ops *dfops,
6473 struct xfs_inode *ip,
6474 struct xfs_bmbt_irec *PREV)
6476 if (!xfs_bmap_is_update_needed(PREV))
6479 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6480 XFS_DATA_FORK, PREV);
6484 * Process one of the deferred bmap operations. We pass back the
6485 * btree cursor to maintain our lock on the bmapbt between calls.
6488 xfs_bmap_finish_one(
6489 struct xfs_trans *tp,
6490 struct xfs_defer_ops *dfops,
6491 struct xfs_inode *ip,
6492 enum xfs_bmap_intent_type type,
6494 xfs_fileoff_t startoff,
6495 xfs_fsblock_t startblock,
6496 xfs_filblks_t *blockcount,
6499 xfs_fsblock_t firstfsb;
6502 trace_xfs_bmap_deferred(tp->t_mountp,
6503 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6504 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6505 ip->i_ino, whichfork, startoff, *blockcount, state);
6507 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6508 return -EFSCORRUPTED;
6510 if (XFS_TEST_ERROR(false, tp->t_mountp,
6511 XFS_ERRTAG_BMAP_FINISH_ONE))
6516 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6520 case XFS_BMAP_UNMAP:
6521 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6522 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
6526 error = -EFSCORRUPTED;