1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2012 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
32 /* Kernel only BMAP related definitions and functions */
35 * Convert the given file system block to a disk block. We have to treat it
36 * differently based on whether the file is a real time file or not, because the
40 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
42 if (XFS_IS_REALTIME_INODE(ip))
43 return XFS_FSB_TO_BB(ip->i_mount, fsb);
44 return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
48 * Routine to zero an extent on disk allocated to the specific inode.
50 * The VFS functions take a linearised filesystem block offset, so we have to
51 * convert the sparse xfs fsb to the right format first.
52 * VFS types are real funky, too.
57 xfs_fsblock_t start_fsb,
60 struct xfs_mount *mp = ip->i_mount;
61 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
62 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
63 sector_t block = XFS_BB_TO_FSBT(mp, sector);
65 return blkdev_issue_zeroout(target->bt_bdev,
66 block << (mp->m_super->s_blocksize_bits - 9),
67 count_fsb << (mp->m_super->s_blocksize_bits - 9),
74 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
76 int error; /* error return value */
77 xfs_mount_t *mp; /* mount point structure */
78 xfs_extlen_t prod = 0; /* product factor for allocators */
79 xfs_extlen_t mod = 0; /* product factor for allocators */
80 xfs_extlen_t ralen = 0; /* realtime allocation length */
81 xfs_extlen_t align; /* minimum allocation alignment */
85 align = xfs_get_extsz_hint(ap->ip);
86 prod = align / mp->m_sb.sb_rextsize;
87 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
89 ap->conv, &ap->offset, &ap->length);
93 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
96 * If the offset & length are not perfectly aligned
97 * then kill prod, it will just get us in trouble.
99 div_u64_rem(ap->offset, align, &mod);
100 if (mod || ap->length % align)
103 * Set ralen to be the actual requested length in rtextents.
105 ralen = ap->length / mp->m_sb.sb_rextsize;
107 * If the old value was close enough to MAXEXTLEN that
108 * we rounded up to it, cut it back so it's valid again.
109 * Note that if it's a really large request (bigger than
110 * MAXEXTLEN), we don't hear about that number, and can't
111 * adjust the starting point to match it.
113 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
114 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
117 * Lock out modifications to both the RT bitmap and summary inodes
119 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
120 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
121 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
122 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
125 * If it's an allocation to an empty file at offset 0,
126 * pick an extent that will space things out in the rt area.
128 if (ap->eof && ap->offset == 0) {
129 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
131 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
134 ap->blkno = rtx * mp->m_sb.sb_rextsize;
139 xfs_bmap_adjacent(ap);
142 * Realtime allocation, done through xfs_rtallocate_extent.
144 do_div(ap->blkno, mp->m_sb.sb_rextsize);
147 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
148 &ralen, ap->wasdel, prod, &rtb);
153 if (ap->blkno != NULLFSBLOCK) {
154 ap->blkno *= mp->m_sb.sb_rextsize;
155 ralen *= mp->m_sb.sb_rextsize;
157 ap->ip->i_d.di_nblocks += ralen;
158 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
160 ap->ip->i_delayed_blks -= ralen;
162 * Adjust the disk quota also. This was reserved
165 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
166 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
167 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
169 /* Zero the extent if we were asked to do so */
170 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
171 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
180 #endif /* CONFIG_XFS_RT */
183 * Check if the endoff is outside the last extent. If so the caller will grow
184 * the allocation to a stripe unit boundary. All offsets are considered outside
185 * the end of file for an empty fork, so 1 is returned in *eof in that case.
189 struct xfs_inode *ip,
190 xfs_fileoff_t endoff,
194 struct xfs_bmbt_irec rec;
197 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
201 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
206 * Extent tree block counting routines.
210 * Count leaf blocks given a range of extent records. Delayed allocation
211 * extents are not counted towards the totals.
214 xfs_bmap_count_leaves(
215 struct xfs_ifork *ifp,
216 xfs_filblks_t *count)
218 struct xfs_iext_cursor icur;
219 struct xfs_bmbt_irec got;
220 xfs_extnum_t numrecs = 0;
222 for_each_xfs_iext(ifp, &icur, &got) {
223 if (!isnullstartblock(got.br_startblock)) {
224 *count += got.br_blockcount;
233 * Count fsblocks of the given fork. Delayed allocation extents are
234 * not counted towards the totals.
237 xfs_bmap_count_blocks(
238 struct xfs_trans *tp,
239 struct xfs_inode *ip,
241 xfs_extnum_t *nextents,
242 xfs_filblks_t *count)
244 struct xfs_mount *mp = ip->i_mount;
245 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
246 struct xfs_btree_cur *cur;
247 xfs_extlen_t btblocks = 0;
256 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
257 case XFS_DINODE_FMT_BTREE:
258 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
259 error = xfs_iread_extents(tp, ip, whichfork);
264 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
265 error = xfs_btree_count_blocks(cur, &btblocks);
266 xfs_btree_del_cursor(cur, error);
271 * xfs_btree_count_blocks includes the root block contained in
272 * the inode fork in @btblocks, so subtract one because we're
273 * only interested in allocated disk blocks.
275 *count += btblocks - 1;
278 case XFS_DINODE_FMT_EXTENTS:
279 *nextents = xfs_bmap_count_leaves(ifp, count);
287 xfs_getbmap_report_one(
288 struct xfs_inode *ip,
289 struct getbmapx *bmv,
290 struct kgetbmap *out,
292 struct xfs_bmbt_irec *got)
294 struct kgetbmap *p = out + bmv->bmv_entries;
298 error = xfs_reflink_trim_around_shared(ip, got, &shared);
302 if (isnullstartblock(got->br_startblock) ||
303 got->br_startblock == DELAYSTARTBLOCK) {
305 * Delalloc extents that start beyond EOF can occur due to
306 * speculative EOF allocation when the delalloc extent is larger
307 * than the largest freespace extent at conversion time. These
308 * extents cannot be converted by data writeback, so can exist
309 * here even if we are not supposed to be finding delalloc
312 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
313 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
315 p->bmv_oflags |= BMV_OF_DELALLOC;
318 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
321 if (got->br_state == XFS_EXT_UNWRITTEN &&
322 (bmv->bmv_iflags & BMV_IF_PREALLOC))
323 p->bmv_oflags |= BMV_OF_PREALLOC;
326 p->bmv_oflags |= BMV_OF_SHARED;
328 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
329 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
331 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
332 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
338 xfs_getbmap_report_hole(
339 struct xfs_inode *ip,
340 struct getbmapx *bmv,
341 struct kgetbmap *out,
346 struct kgetbmap *p = out + bmv->bmv_entries;
348 if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
352 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
353 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
355 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
356 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
362 struct getbmapx *bmv)
364 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
368 xfs_getbmap_next_rec(
369 struct xfs_bmbt_irec *rec,
370 xfs_fileoff_t total_end)
372 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
374 if (end == total_end)
377 rec->br_startoff += rec->br_blockcount;
378 if (!isnullstartblock(rec->br_startblock) &&
379 rec->br_startblock != DELAYSTARTBLOCK)
380 rec->br_startblock += rec->br_blockcount;
381 rec->br_blockcount = total_end - end;
386 * Get inode's extents as described in bmv, and format for output.
387 * Calls formatter to fill the user's buffer until all extents
388 * are mapped, until the passed-in bmv->bmv_count slots have
389 * been filled, or until the formatter short-circuits the loop,
390 * if it is tracking filled-in extents on its own.
394 struct xfs_inode *ip,
395 struct getbmapx *bmv, /* user bmap structure */
396 struct kgetbmap *out)
398 struct xfs_mount *mp = ip->i_mount;
399 int iflags = bmv->bmv_iflags;
400 int whichfork, lock, error = 0;
401 int64_t bmv_end, max_len;
402 xfs_fileoff_t bno, first_bno;
403 struct xfs_ifork *ifp;
404 struct xfs_bmbt_irec got, rec;
406 struct xfs_iext_cursor icur;
408 if (bmv->bmv_iflags & ~BMV_IF_VALID)
411 /* Only allow CoW fork queries if we're debugging. */
412 if (iflags & BMV_IF_COWFORK)
415 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
418 if (bmv->bmv_length < -1)
420 bmv->bmv_entries = 0;
421 if (bmv->bmv_length == 0)
424 if (iflags & BMV_IF_ATTRFORK)
425 whichfork = XFS_ATTR_FORK;
426 else if (iflags & BMV_IF_COWFORK)
427 whichfork = XFS_COW_FORK;
429 whichfork = XFS_DATA_FORK;
430 ifp = XFS_IFORK_PTR(ip, whichfork);
432 xfs_ilock(ip, XFS_IOLOCK_SHARED);
435 if (!XFS_IFORK_Q(ip))
436 goto out_unlock_iolock;
439 lock = xfs_ilock_attr_map_shared(ip);
442 /* No CoW fork? Just return */
444 goto out_unlock_iolock;
446 if (xfs_get_cowextsz_hint(ip))
447 max_len = mp->m_super->s_maxbytes;
449 max_len = XFS_ISIZE(ip);
451 lock = XFS_ILOCK_SHARED;
455 if (!(iflags & BMV_IF_DELALLOC) &&
456 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
457 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
459 goto out_unlock_iolock;
462 * Even after flushing the inode, there can still be
463 * delalloc blocks on the inode beyond EOF due to
464 * speculative preallocation. These are not removed
465 * until the release function is called or the inode
466 * is inactivated. Hence we cannot assert here that
467 * ip->i_delayed_blks == 0.
471 if (xfs_get_extsz_hint(ip) ||
473 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
474 max_len = mp->m_super->s_maxbytes;
476 max_len = XFS_ISIZE(ip);
478 lock = xfs_ilock_data_map_shared(ip);
482 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
483 case XFS_DINODE_FMT_EXTENTS:
484 case XFS_DINODE_FMT_BTREE:
486 case XFS_DINODE_FMT_LOCAL:
487 /* Local format inode forks report no extents. */
488 goto out_unlock_ilock;
491 goto out_unlock_ilock;
494 if (bmv->bmv_length == -1) {
495 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
496 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
499 bmv_end = bmv->bmv_offset + bmv->bmv_length;
501 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
502 len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
504 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
505 error = xfs_iread_extents(NULL, ip, whichfork);
507 goto out_unlock_ilock;
510 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
512 * Report a whole-file hole if the delalloc flag is set to
513 * stay compatible with the old implementation.
515 if (iflags & BMV_IF_DELALLOC)
516 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
517 XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
518 goto out_unlock_ilock;
521 while (!xfs_getbmap_full(bmv)) {
522 xfs_trim_extent(&got, first_bno, len);
525 * Report an entry for a hole if this extent doesn't directly
526 * follow the previous one.
528 if (got.br_startoff > bno) {
529 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
531 if (xfs_getbmap_full(bmv))
536 * In order to report shared extents accurately, we report each
537 * distinct shared / unshared part of a single bmbt record with
538 * an individual getbmapx record.
540 bno = got.br_startoff + got.br_blockcount;
543 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
545 if (error || xfs_getbmap_full(bmv))
546 goto out_unlock_ilock;
547 } while (xfs_getbmap_next_rec(&rec, bno));
549 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
550 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
552 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
554 if (whichfork != XFS_ATTR_FORK && bno < end &&
555 !xfs_getbmap_full(bmv)) {
556 xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
562 if (bno >= first_bno + len)
567 xfs_iunlock(ip, lock);
569 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
574 * Dead simple method of punching delalyed allocation blocks from a range in
575 * the inode. This will always punch out both the start and end blocks, even
576 * if the ranges only partially overlap them, so it is up to the caller to
577 * ensure that partial blocks are not passed in.
580 xfs_bmap_punch_delalloc_range(
581 struct xfs_inode *ip,
582 xfs_fileoff_t start_fsb,
583 xfs_fileoff_t length)
585 struct xfs_ifork *ifp = &ip->i_df;
586 xfs_fileoff_t end_fsb = start_fsb + length;
587 struct xfs_bmbt_irec got, del;
588 struct xfs_iext_cursor icur;
591 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
593 xfs_ilock(ip, XFS_ILOCK_EXCL);
594 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
597 while (got.br_startoff + got.br_blockcount > start_fsb) {
599 xfs_trim_extent(&del, start_fsb, length);
602 * A delete can push the cursor forward. Step back to the
603 * previous extent on non-delalloc or extents outside the
606 if (!del.br_blockcount ||
607 !isnullstartblock(del.br_startblock)) {
608 if (!xfs_iext_prev_extent(ifp, &icur, &got))
613 error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
615 if (error || !xfs_iext_get_extent(ifp, &icur, &got))
620 xfs_iunlock(ip, XFS_ILOCK_EXCL);
625 * Test whether it is appropriate to check an inode for and free post EOF
626 * blocks. The 'force' parameter determines whether we should also consider
627 * regular files that are marked preallocated or append-only.
630 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
632 /* prealloc/delalloc exists only on regular files */
633 if (!S_ISREG(VFS_I(ip)->i_mode))
637 * Zero sized files with no cached pages and delalloc blocks will not
638 * have speculative prealloc/delalloc blocks to remove.
640 if (VFS_I(ip)->i_size == 0 &&
641 VFS_I(ip)->i_mapping->nrpages == 0 &&
642 ip->i_delayed_blks == 0)
645 /* If we haven't read in the extent list, then don't do it now. */
646 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
650 * Do not free real preallocated or append-only files unless the file
651 * has delalloc blocks and we are forced to remove them.
653 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
654 if (!force || ip->i_delayed_blks == 0)
661 * This is called to free any blocks beyond eof. The caller must hold
662 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
663 * reference to the inode.
667 struct xfs_inode *ip)
669 struct xfs_trans *tp;
671 xfs_fileoff_t end_fsb;
672 xfs_fileoff_t last_fsb;
673 xfs_filblks_t map_len;
675 struct xfs_bmbt_irec imap;
676 struct xfs_mount *mp = ip->i_mount;
679 * Figure out if there are any blocks beyond the end
680 * of the file. If not, then there is nothing to do.
682 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
683 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
684 if (last_fsb <= end_fsb)
686 map_len = last_fsb - end_fsb;
689 xfs_ilock(ip, XFS_ILOCK_SHARED);
690 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
691 xfs_iunlock(ip, XFS_ILOCK_SHARED);
694 * If there are blocks after the end of file, truncate the file to its
695 * current size to free them up.
697 if (!error && (nimaps != 0) &&
698 (imap.br_startblock != HOLESTARTBLOCK ||
699 ip->i_delayed_blks)) {
701 * Attach the dquots to the inode up front.
703 error = xfs_qm_dqattach(ip);
707 /* wait on dio to ensure i_size has settled */
708 inode_dio_wait(VFS_I(ip));
710 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
713 ASSERT(XFS_FORCED_SHUTDOWN(mp));
717 xfs_ilock(ip, XFS_ILOCK_EXCL);
718 xfs_trans_ijoin(tp, ip, 0);
721 * Do not update the on-disk file size. If we update the
722 * on-disk file size and then the system crashes before the
723 * contents of the file are flushed to disk then the files
724 * may be full of holes (ie NULL files bug).
726 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
727 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
730 * If we get an error at this point we simply don't
731 * bother truncating the file.
733 xfs_trans_cancel(tp);
735 error = xfs_trans_commit(tp);
737 xfs_inode_clear_eofblocks_tag(ip);
740 xfs_iunlock(ip, XFS_ILOCK_EXCL);
746 xfs_alloc_file_space(
747 struct xfs_inode *ip,
752 xfs_mount_t *mp = ip->i_mount;
754 xfs_filblks_t allocated_fsb;
755 xfs_filblks_t allocatesize_fsb;
756 xfs_extlen_t extsz, temp;
757 xfs_fileoff_t startoffset_fsb;
758 xfs_fileoff_t endoffset_fsb;
763 xfs_bmbt_irec_t imaps[1], *imapp;
764 uint qblocks, resblks, resrtextents;
767 trace_xfs_alloc_file_space(ip);
769 if (XFS_FORCED_SHUTDOWN(mp))
772 error = xfs_qm_dqattach(ip);
779 rt = XFS_IS_REALTIME_INODE(ip);
780 extsz = xfs_get_extsz_hint(ip);
785 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
786 endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
787 allocatesize_fsb = endoffset_fsb - startoffset_fsb;
790 * Allocate file space until done or until there is an error
792 while (allocatesize_fsb && !error) {
796 * Determine space reservations for data/realtime.
798 if (unlikely(extsz)) {
802 e = startoffset_fsb + allocatesize_fsb;
803 div_u64_rem(startoffset_fsb, extsz, &temp);
806 div_u64_rem(e, extsz, &temp);
811 e = allocatesize_fsb;
815 * The transaction reservation is limited to a 32-bit block
816 * count, hence we need to limit the number of blocks we are
817 * trying to reserve to avoid an overflow. We can't allocate
818 * more than @nimaps extents, and an extent is limited on disk
819 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
821 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
823 resrtextents = qblocks = resblks;
824 resrtextents /= mp->m_sb.sb_rextsize;
825 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
826 quota_flag = XFS_QMOPT_RES_RTBLKS;
829 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
830 quota_flag = XFS_QMOPT_RES_REGBLKS;
834 * Allocate and setup the transaction.
836 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
837 resrtextents, 0, &tp);
840 * Check for running out of space
844 * Free the transaction structure.
846 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
849 xfs_ilock(ip, XFS_ILOCK_EXCL);
850 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
855 xfs_trans_ijoin(tp, ip, 0);
857 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
858 allocatesize_fsb, alloc_type, 0, imapp,
864 * Complete the transaction
866 error = xfs_trans_commit(tp);
867 xfs_iunlock(ip, XFS_ILOCK_EXCL);
871 allocated_fsb = imapp->br_blockcount;
878 startoffset_fsb += allocated_fsb;
879 allocatesize_fsb -= allocated_fsb;
884 error0: /* unlock inode, unreserve quota blocks, cancel trans */
885 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
887 error1: /* Just cancel transaction */
888 xfs_trans_cancel(tp);
889 xfs_iunlock(ip, XFS_ILOCK_EXCL);
895 struct xfs_inode *ip,
896 xfs_fileoff_t startoffset_fsb,
897 xfs_filblks_t len_fsb,
900 struct xfs_mount *mp = ip->i_mount;
901 struct xfs_trans *tp;
902 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
905 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
907 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
911 xfs_ilock(ip, XFS_ILOCK_EXCL);
912 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
913 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
915 goto out_trans_cancel;
917 xfs_trans_ijoin(tp, ip, 0);
919 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
921 goto out_trans_cancel;
923 error = xfs_trans_commit(tp);
925 xfs_iunlock(ip, XFS_ILOCK_EXCL);
929 xfs_trans_cancel(tp);
934 xfs_flush_unmap_range(
935 struct xfs_inode *ip,
939 struct xfs_mount *mp = ip->i_mount;
940 struct inode *inode = VFS_I(ip);
941 xfs_off_t rounding, start, end;
944 /* wait for the completion of any pending DIOs */
945 inode_dio_wait(inode);
947 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
948 start = round_down(offset, rounding);
949 end = round_up(offset + len, rounding) - 1;
951 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
954 truncate_pagecache_range(inode, start, end);
960 struct xfs_inode *ip,
964 struct xfs_mount *mp = ip->i_mount;
965 xfs_fileoff_t startoffset_fsb;
966 xfs_fileoff_t endoffset_fsb;
969 trace_xfs_free_file_space(ip);
971 error = xfs_qm_dqattach(ip);
975 if (len <= 0) /* if nothing being freed */
978 error = xfs_flush_unmap_range(ip, offset, len);
982 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
983 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
986 * Need to zero the stuff we're not freeing, on disk.
988 if (endoffset_fsb > startoffset_fsb) {
990 error = xfs_unmap_extent(ip, startoffset_fsb,
991 endoffset_fsb - startoffset_fsb, &done);
998 * Now that we've unmap all full blocks we'll have to zero out any
999 * partial block at the beginning and/or end. iomap_zero_range is smart
1000 * enough to skip any holes, including those we just created, but we
1001 * must take care not to zero beyond EOF and enlarge i_size.
1003 if (offset >= XFS_ISIZE(ip))
1005 if (offset + len > XFS_ISIZE(ip))
1006 len = XFS_ISIZE(ip) - offset;
1007 error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
1008 &xfs_buffered_write_iomap_ops);
1013 * If we zeroed right up to EOF and EOF straddles a page boundary we
1014 * must make sure that the post-EOF area is also zeroed because the
1015 * page could be mmap'd and iomap_zero_range doesn't do that for us.
1016 * Writeback of the eof page will do this, albeit clumsily.
1018 if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1019 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1020 round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1028 struct xfs_inode *ip,
1034 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1035 * into the accessible region of the file.
1037 if (xfs_can_free_eofblocks(ip, true)) {
1038 error = xfs_free_eofblocks(ip);
1044 * Writeback and invalidate cache for the remainder of the file as we're
1045 * about to shift down every extent from offset to EOF.
1047 error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1052 * Clean out anything hanging around in the cow fork now that
1053 * we've flushed all the dirty data out to disk to avoid having
1054 * CoW extents at the wrong offsets.
1056 if (xfs_inode_has_cow_data(ip)) {
1057 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1067 * xfs_collapse_file_space()
1068 * This routine frees disk space and shift extent for the given file.
1069 * The first thing we do is to free data blocks in the specified range
1070 * by calling xfs_free_file_space(). It would also sync dirty data
1071 * and invalidate page cache over the region on which collapse range
1072 * is working. And Shift extent records to the left to cover a hole.
1079 xfs_collapse_file_space(
1080 struct xfs_inode *ip,
1084 struct xfs_mount *mp = ip->i_mount;
1085 struct xfs_trans *tp;
1087 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
1088 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1089 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1092 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1093 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1095 trace_xfs_collapse_file_space(ip);
1097 error = xfs_free_file_space(ip, offset, len);
1101 error = xfs_prepare_shift(ip, offset);
1105 while (!error && !done) {
1106 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1111 xfs_ilock(ip, XFS_ILOCK_EXCL);
1112 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1113 ip->i_gdquot, ip->i_pdquot, resblks, 0,
1114 XFS_QMOPT_RES_REGBLKS);
1116 goto out_trans_cancel;
1117 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1119 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1122 goto out_trans_cancel;
1124 error = xfs_trans_commit(tp);
1130 xfs_trans_cancel(tp);
1135 * xfs_insert_file_space()
1136 * This routine create hole space by shifting extents for the given file.
1137 * The first thing we do is to sync dirty data and invalidate page cache
1138 * over the region on which insert range is working. And split an extent
1139 * to two extents at given offset by calling xfs_bmap_split_extent.
1140 * And shift all extent records which are laying between [offset,
1141 * last allocated extent] to the right to reserve hole range.
1147 xfs_insert_file_space(
1148 struct xfs_inode *ip,
1152 struct xfs_mount *mp = ip->i_mount;
1153 struct xfs_trans *tp;
1155 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
1156 xfs_fileoff_t next_fsb = NULLFSBLOCK;
1157 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1160 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1161 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1163 trace_xfs_insert_file_space(ip);
1165 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1169 error = xfs_prepare_shift(ip, offset);
1174 * The extent shifting code works on extent granularity. So, if stop_fsb
1175 * is not the starting block of extent, we need to split the extent at
1178 error = xfs_bmap_split_extent(ip, stop_fsb);
1182 while (!error && !done) {
1183 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1188 xfs_ilock(ip, XFS_ILOCK_EXCL);
1189 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1190 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1193 goto out_trans_cancel;
1195 error = xfs_trans_commit(tp);
1201 xfs_trans_cancel(tp);
1206 * We need to check that the format of the data fork in the temporary inode is
1207 * valid for the target inode before doing the swap. This is not a problem with
1208 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1209 * data fork depending on the space the attribute fork is taking so we can get
1210 * invalid formats on the target inode.
1212 * E.g. target has space for 7 extents in extent format, temp inode only has
1213 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1214 * btree, but when swapped it needs to be in extent format. Hence we can't just
1215 * blindly swap data forks on attr2 filesystems.
1217 * Note that we check the swap in both directions so that we don't end up with
1218 * a corrupt temporary inode, either.
1220 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1221 * inode will prevent this situation from occurring, so all we do here is
1222 * reject and log the attempt. basically we are putting the responsibility on
1223 * userspace to get this right.
1226 xfs_swap_extents_check_format(
1227 struct xfs_inode *ip, /* target inode */
1228 struct xfs_inode *tip) /* tmp inode */
1231 /* Should never get a local format */
1232 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1233 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1237 * if the target inode has less extents that then temporary inode then
1238 * why did userspace call us?
1240 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1244 * If we have to use the (expensive) rmap swap method, we can
1245 * handle any number of extents and any format.
1247 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1251 * if the target inode is in extent form and the temp inode is in btree
1252 * form then we will end up with the target inode in the wrong format
1253 * as we already know there are less extents in the temp inode.
1255 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1256 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1259 /* Check temp in extent form to max in target */
1260 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1261 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1262 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1265 /* Check target in extent form to max in temp */
1266 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1267 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1268 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1272 * If we are in a btree format, check that the temp root block will fit
1273 * in the target and that it has enough extents to be in btree format
1276 * Note that we have to be careful to allow btree->extent conversions
1277 * (a common defrag case) which will occur when the temp inode is in
1280 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1281 if (XFS_IFORK_Q(ip) &&
1282 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1284 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1285 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1289 /* Reciprocal target->temp btree format checks */
1290 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1291 if (XFS_IFORK_Q(tip) &&
1292 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1294 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1295 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1303 xfs_swap_extent_flush(
1304 struct xfs_inode *ip)
1308 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1311 truncate_pagecache_range(VFS_I(ip), 0, -1);
1313 /* Verify O_DIRECT for ftmp */
1314 if (VFS_I(ip)->i_mapping->nrpages)
1320 * Move extents from one file to another, when rmap is enabled.
1323 xfs_swap_extent_rmap(
1324 struct xfs_trans **tpp,
1325 struct xfs_inode *ip,
1326 struct xfs_inode *tip)
1328 struct xfs_trans *tp = *tpp;
1329 struct xfs_bmbt_irec irec;
1330 struct xfs_bmbt_irec uirec;
1331 struct xfs_bmbt_irec tirec;
1332 xfs_fileoff_t offset_fsb;
1333 xfs_fileoff_t end_fsb;
1334 xfs_filblks_t count_fsb;
1339 uint64_t tip_flags2;
1342 * If the source file has shared blocks, we must flag the donor
1343 * file as having shared blocks so that we get the shared-block
1344 * rmap functions when we go to fix up the rmaps. The flags
1345 * will be switch for reals later.
1347 tip_flags2 = tip->i_d.di_flags2;
1348 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1349 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1352 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1353 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1356 /* Read extent from the donor file */
1358 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1362 ASSERT(nimaps == 1);
1363 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1365 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1366 ilen = tirec.br_blockcount;
1368 /* Unmap the old blocks in the source file. */
1369 while (tirec.br_blockcount) {
1370 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1371 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1373 /* Read extent from the source file */
1375 error = xfs_bmapi_read(ip, tirec.br_startoff,
1376 tirec.br_blockcount, &irec,
1380 ASSERT(nimaps == 1);
1381 ASSERT(tirec.br_startoff == irec.br_startoff);
1382 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1384 /* Trim the extent. */
1386 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1387 tirec.br_blockcount,
1388 irec.br_blockcount);
1389 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1391 /* Remove the mapping from the donor file. */
1392 xfs_bmap_unmap_extent(tp, tip, &uirec);
1394 /* Remove the mapping from the source file. */
1395 xfs_bmap_unmap_extent(tp, ip, &irec);
1397 /* Map the donor file's blocks into the source file. */
1398 xfs_bmap_map_extent(tp, ip, &uirec);
1400 /* Map the source file's blocks into the donor file. */
1401 xfs_bmap_map_extent(tp, tip, &irec);
1403 error = xfs_defer_finish(tpp);
1408 tirec.br_startoff += rlen;
1409 if (tirec.br_startblock != HOLESTARTBLOCK &&
1410 tirec.br_startblock != DELAYSTARTBLOCK)
1411 tirec.br_startblock += rlen;
1412 tirec.br_blockcount -= rlen;
1420 tip->i_d.di_flags2 = tip_flags2;
1424 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1425 tip->i_d.di_flags2 = tip_flags2;
1429 /* Swap the extents of two files by swapping data forks. */
1431 xfs_swap_extent_forks(
1432 struct xfs_trans *tp,
1433 struct xfs_inode *ip,
1434 struct xfs_inode *tip,
1436 int *target_log_flags)
1438 xfs_filblks_t aforkblks = 0;
1439 xfs_filblks_t taforkblks = 0;
1445 * Count the number of extended attribute blocks
1447 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1448 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1449 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1454 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1455 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1456 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1463 * Btree format (v3) inodes have the inode number stamped in the bmbt
1464 * block headers. We can't start changing the bmbt blocks until the
1465 * inode owner change is logged so recovery does the right thing in the
1466 * event of a crash. Set the owner change log flags now and leave the
1467 * bmbt scan as the last step.
1469 if (ip->i_d.di_version == 3 &&
1470 ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1471 (*target_log_flags) |= XFS_ILOG_DOWNER;
1472 if (tip->i_d.di_version == 3 &&
1473 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1474 (*src_log_flags) |= XFS_ILOG_DOWNER;
1477 * Swap the data forks of the inodes
1479 swap(ip->i_df, tip->i_df);
1482 * Fix the on-disk inode values
1484 tmp = (uint64_t)ip->i_d.di_nblocks;
1485 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1486 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1488 swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
1489 swap(ip->i_d.di_format, tip->i_d.di_format);
1492 * The extents in the source inode could still contain speculative
1493 * preallocation beyond EOF (e.g. the file is open but not modified
1494 * while defrag is in progress). In that case, we need to copy over the
1495 * number of delalloc blocks the data fork in the source inode is
1496 * tracking beyond EOF so that when the fork is truncated away when the
1497 * temporary inode is unlinked we don't underrun the i_delayed_blks
1498 * counter on that inode.
1500 ASSERT(tip->i_delayed_blks == 0);
1501 tip->i_delayed_blks = ip->i_delayed_blks;
1502 ip->i_delayed_blks = 0;
1504 switch (ip->i_d.di_format) {
1505 case XFS_DINODE_FMT_EXTENTS:
1506 (*src_log_flags) |= XFS_ILOG_DEXT;
1508 case XFS_DINODE_FMT_BTREE:
1509 ASSERT(ip->i_d.di_version < 3 ||
1510 (*src_log_flags & XFS_ILOG_DOWNER));
1511 (*src_log_flags) |= XFS_ILOG_DBROOT;
1515 switch (tip->i_d.di_format) {
1516 case XFS_DINODE_FMT_EXTENTS:
1517 (*target_log_flags) |= XFS_ILOG_DEXT;
1519 case XFS_DINODE_FMT_BTREE:
1520 (*target_log_flags) |= XFS_ILOG_DBROOT;
1521 ASSERT(tip->i_d.di_version < 3 ||
1522 (*target_log_flags & XFS_ILOG_DOWNER));
1530 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1531 * change owner scan attempts to order all modified buffers in the current
1532 * transaction. In the event of ordered buffer failure, the offending buffer is
1533 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1534 * the transaction in this case to replenish the fallback log reservation and
1535 * restart the scan. This process repeats until the scan completes.
1538 xfs_swap_change_owner(
1539 struct xfs_trans **tpp,
1540 struct xfs_inode *ip,
1541 struct xfs_inode *tmpip)
1544 struct xfs_trans *tp = *tpp;
1547 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1549 /* success or fatal error */
1550 if (error != -EAGAIN)
1553 error = xfs_trans_roll(tpp);
1559 * Redirty both inodes so they can relog and keep the log tail
1562 xfs_trans_ijoin(tp, ip, 0);
1563 xfs_trans_ijoin(tp, tmpip, 0);
1564 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1565 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1573 struct xfs_inode *ip, /* target inode */
1574 struct xfs_inode *tip, /* tmp inode */
1575 struct xfs_swapext *sxp)
1577 struct xfs_mount *mp = ip->i_mount;
1578 struct xfs_trans *tp;
1579 struct xfs_bstat *sbp = &sxp->sx_stat;
1580 int src_log_flags, target_log_flags;
1587 * Lock the inodes against other IO, page faults and truncate to
1588 * begin with. Then we can ensure the inodes are flushed and have no
1589 * page cache safely. Once we have done this we can take the ilocks and
1590 * do the rest of the checks.
1592 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1593 lock_flags = XFS_MMAPLOCK_EXCL;
1594 xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1596 /* Verify that both files have the same format */
1597 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1602 /* Verify both files are either real-time or non-realtime */
1603 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1608 error = xfs_swap_extent_flush(ip);
1611 error = xfs_swap_extent_flush(tip);
1615 if (xfs_inode_has_cow_data(tip)) {
1616 error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1622 * Extent "swapping" with rmap requires a permanent reservation and
1623 * a block reservation because it's really just a remap operation
1624 * performed with log redo items!
1626 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1627 int w = XFS_DATA_FORK;
1628 uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w);
1629 uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w);
1632 * Conceptually this shouldn't affect the shape of either bmbt,
1633 * but since we atomically move extents one by one, we reserve
1634 * enough space to rebuild both trees.
1636 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1637 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1640 * Handle the corner case where either inode might straddle the
1641 * btree format boundary. If so, the inode could bounce between
1642 * btree <-> extent format on unmap -> remap cycles, freeing and
1643 * allocating a bmapbt block each time.
1645 if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1646 resblks += XFS_IFORK_MAXEXT(ip, w);
1647 if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1648 resblks += XFS_IFORK_MAXEXT(tip, w);
1650 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1655 * Lock and join the inodes to the tansaction so that transaction commit
1656 * or cancel will unlock the inodes from this point onwards.
1658 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1659 lock_flags |= XFS_ILOCK_EXCL;
1660 xfs_trans_ijoin(tp, ip, 0);
1661 xfs_trans_ijoin(tp, tip, 0);
1664 /* Verify all data are being swapped */
1665 if (sxp->sx_offset != 0 ||
1666 sxp->sx_length != ip->i_d.di_size ||
1667 sxp->sx_length != tip->i_d.di_size) {
1669 goto out_trans_cancel;
1672 trace_xfs_swap_extent_before(ip, 0);
1673 trace_xfs_swap_extent_before(tip, 1);
1675 /* check inode formats now that data is flushed */
1676 error = xfs_swap_extents_check_format(ip, tip);
1679 "%s: inode 0x%llx format is incompatible for exchanging.",
1680 __func__, ip->i_ino);
1681 goto out_trans_cancel;
1685 * Compare the current change & modify times with that
1686 * passed in. If they differ, we abort this swap.
1687 * This is the mechanism used to ensure the calling
1688 * process that the file was not changed out from
1691 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1692 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1693 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1694 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1696 goto out_trans_cancel;
1700 * Note the trickiness in setting the log flags - we set the owner log
1701 * flag on the opposite inode (i.e. the inode we are setting the new
1702 * owner to be) because once we swap the forks and log that, log
1703 * recovery is going to see the fork as owned by the swapped inode,
1704 * not the pre-swapped inodes.
1706 src_log_flags = XFS_ILOG_CORE;
1707 target_log_flags = XFS_ILOG_CORE;
1709 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1710 error = xfs_swap_extent_rmap(&tp, ip, tip);
1712 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1715 goto out_trans_cancel;
1717 /* Do we have to swap reflink flags? */
1718 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1719 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1720 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1721 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1722 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1723 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1724 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1727 /* Swap the cow forks. */
1728 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1729 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1730 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1732 swap(ip->i_cnextents, tip->i_cnextents);
1733 swap(ip->i_cowfp, tip->i_cowfp);
1735 if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1736 xfs_inode_set_cowblocks_tag(ip);
1738 xfs_inode_clear_cowblocks_tag(ip);
1739 if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1740 xfs_inode_set_cowblocks_tag(tip);
1742 xfs_inode_clear_cowblocks_tag(tip);
1745 xfs_trans_log_inode(tp, ip, src_log_flags);
1746 xfs_trans_log_inode(tp, tip, target_log_flags);
1749 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1750 * have inode number owner values in the bmbt blocks that still refer to
1751 * the old inode. Scan each bmbt to fix up the owner values with the
1752 * inode number of the current inode.
1754 if (src_log_flags & XFS_ILOG_DOWNER) {
1755 error = xfs_swap_change_owner(&tp, ip, tip);
1757 goto out_trans_cancel;
1759 if (target_log_flags & XFS_ILOG_DOWNER) {
1760 error = xfs_swap_change_owner(&tp, tip, ip);
1762 goto out_trans_cancel;
1766 * If this is a synchronous mount, make sure that the
1767 * transaction goes to disk before returning to the user.
1769 if (mp->m_flags & XFS_MOUNT_WSYNC)
1770 xfs_trans_set_sync(tp);
1772 error = xfs_trans_commit(tp);
1774 trace_xfs_swap_extent_after(ip, 0);
1775 trace_xfs_swap_extent_after(tip, 1);
1778 xfs_iunlock(ip, lock_flags);
1779 xfs_iunlock(tip, lock_flags);
1780 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1784 xfs_trans_cancel(tp);