1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_iomap.h"
16 #include "xfs_trace.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_reflink.h"
21 struct xfs_writepage_ctx {
22 struct iomap_writepage_ctx ctx;
23 unsigned int data_seq;
27 static inline struct xfs_writepage_ctx *
28 XFS_WPC(struct iomap_writepage_ctx *ctx)
30 return container_of(ctx, struct xfs_writepage_ctx, ctx);
34 xfs_find_bdev_for_inode(
37 struct xfs_inode *ip = XFS_I(inode);
38 struct xfs_mount *mp = ip->i_mount;
40 if (XFS_IS_REALTIME_INODE(ip))
41 return mp->m_rtdev_targp->bt_bdev;
43 return mp->m_ddev_targp->bt_bdev;
47 xfs_find_daxdev_for_inode(
50 struct xfs_inode *ip = XFS_I(inode);
51 struct xfs_mount *mp = ip->i_mount;
53 if (XFS_IS_REALTIME_INODE(ip))
54 return mp->m_rtdev_targp->bt_daxdev;
56 return mp->m_ddev_targp->bt_daxdev;
60 * Fast and loose check if this write could update the on-disk inode size.
62 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
64 return ioend->io_offset + ioend->io_size >
65 XFS_I(ioend->io_inode)->i_d.di_size;
69 xfs_setfilesize_trans_alloc(
70 struct iomap_ioend *ioend)
72 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
76 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
80 ioend->io_private = tp;
83 * We may pass freeze protection with a transaction. So tell lockdep
86 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
88 * We hand off the transaction to the completion thread now, so
89 * clear the flag here.
91 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
96 * Update on-disk file size now that data has been written to disk.
100 struct xfs_inode *ip,
101 struct xfs_trans *tp,
107 xfs_ilock(ip, XFS_ILOCK_EXCL);
108 isize = xfs_new_eof(ip, offset + size);
110 xfs_iunlock(ip, XFS_ILOCK_EXCL);
111 xfs_trans_cancel(tp);
115 trace_xfs_setfilesize(ip, offset, size);
117 ip->i_d.di_size = isize;
118 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
119 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
121 return xfs_trans_commit(tp);
126 struct xfs_inode *ip,
130 struct xfs_mount *mp = ip->i_mount;
131 struct xfs_trans *tp;
134 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
138 return __xfs_setfilesize(ip, tp, offset, size);
142 xfs_setfilesize_ioend(
143 struct iomap_ioend *ioend,
146 struct xfs_inode *ip = XFS_I(ioend->io_inode);
147 struct xfs_trans *tp = ioend->io_private;
150 * The transaction may have been allocated in the I/O submission thread,
151 * thus we need to mark ourselves as being in a transaction manually.
152 * Similarly for freeze protection.
154 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
155 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
157 /* we abort the update if there was an IO error */
159 xfs_trans_cancel(tp);
163 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
167 * IO write completion.
171 struct iomap_ioend *ioend)
173 struct xfs_inode *ip = XFS_I(ioend->io_inode);
174 xfs_off_t offset = ioend->io_offset;
175 size_t size = ioend->io_size;
176 unsigned int nofs_flag;
180 * We can allocate memory here while doing writeback on behalf of
181 * memory reclaim. To avoid memory allocation deadlocks set the
182 * task-wide nofs context for the following operations.
184 nofs_flag = memalloc_nofs_save();
187 * Just clean up the in-memory strutures if the fs has been shut down.
189 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
195 * Clean up any COW blocks on an I/O error.
197 error = blk_status_to_errno(ioend->io_bio->bi_status);
198 if (unlikely(error)) {
199 if (ioend->io_flags & IOMAP_F_SHARED)
200 xfs_reflink_cancel_cow_range(ip, offset, size, true);
205 * Success: commit the COW or unwritten blocks if needed.
207 if (ioend->io_flags & IOMAP_F_SHARED)
208 error = xfs_reflink_end_cow(ip, offset, size);
209 else if (ioend->io_type == IOMAP_UNWRITTEN)
210 error = xfs_iomap_write_unwritten(ip, offset, size, false);
212 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
215 if (ioend->io_private)
216 error = xfs_setfilesize_ioend(ioend, error);
217 iomap_finish_ioends(ioend, error);
218 memalloc_nofs_restore(nofs_flag);
222 * If the to be merged ioend has a preallocated transaction for file
223 * size updates we need to ensure the ioend it is merged into also
224 * has one. If it already has one we can simply cancel the transaction
225 * as it is guaranteed to be clean.
228 xfs_ioend_merge_private(
229 struct iomap_ioend *ioend,
230 struct iomap_ioend *next)
232 if (!ioend->io_private) {
233 ioend->io_private = next->io_private;
234 next->io_private = NULL;
236 xfs_setfilesize_ioend(next, -ECANCELED);
240 /* Finish all pending io completions. */
243 struct work_struct *work)
245 struct xfs_inode *ip =
246 container_of(work, struct xfs_inode, i_ioend_work);
247 struct iomap_ioend *ioend;
248 struct list_head tmp;
251 spin_lock_irqsave(&ip->i_ioend_lock, flags);
252 list_replace_init(&ip->i_ioend_list, &tmp);
253 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
255 iomap_sort_ioends(&tmp);
256 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
258 list_del_init(&ioend->io_list);
259 iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
260 xfs_end_ioend(ioend);
264 static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
266 return ioend->io_private ||
267 ioend->io_type == IOMAP_UNWRITTEN ||
268 (ioend->io_flags & IOMAP_F_SHARED);
275 struct iomap_ioend *ioend = bio->bi_private;
276 struct xfs_inode *ip = XFS_I(ioend->io_inode);
279 ASSERT(xfs_ioend_needs_workqueue(ioend));
281 spin_lock_irqsave(&ip->i_ioend_lock, flags);
282 if (list_empty(&ip->i_ioend_list))
283 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
285 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
286 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
290 * Fast revalidation of the cached writeback mapping. Return true if the current
291 * mapping is valid, false otherwise.
295 struct iomap_writepage_ctx *wpc,
296 struct xfs_inode *ip,
299 if (offset < wpc->iomap.offset ||
300 offset >= wpc->iomap.offset + wpc->iomap.length)
303 * If this is a COW mapping, it is sufficient to check that the mapping
304 * covers the offset. Be careful to check this first because the caller
305 * can revalidate a COW mapping without updating the data seqno.
307 if (wpc->iomap.flags & IOMAP_F_SHARED)
311 * This is not a COW mapping. Check the sequence number of the data fork
312 * because concurrent changes could have invalidated the extent. Check
313 * the COW fork because concurrent changes since the last time we
314 * checked (and found nothing at this offset) could have added
315 * overlapping blocks.
317 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
319 if (xfs_inode_has_cow_data(ip) &&
320 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
326 * Pass in a dellalloc extent and convert it to real extents, return the real
327 * extent that maps offset_fsb in wpc->iomap.
329 * The current page is held locked so nothing could have removed the block
330 * backing offset_fsb, although it could have moved from the COW to the data
331 * fork by another thread.
335 struct iomap_writepage_ctx *wpc,
336 struct xfs_inode *ip,
343 if (whichfork == XFS_COW_FORK)
344 seq = &XFS_WPC(wpc)->cow_seq;
346 seq = &XFS_WPC(wpc)->data_seq;
349 * Attempt to allocate whatever delalloc extent currently backs offset
350 * and put the result into wpc->iomap. Allocate in a loop because it
351 * may take several attempts to allocate real blocks for a contiguous
352 * delalloc extent if free space is sufficiently fragmented.
355 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
359 } while (wpc->iomap.offset + wpc->iomap.length <= offset);
366 struct iomap_writepage_ctx *wpc,
370 struct xfs_inode *ip = XFS_I(inode);
371 struct xfs_mount *mp = ip->i_mount;
372 ssize_t count = i_blocksize(inode);
373 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
374 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
375 xfs_fileoff_t cow_fsb = NULLFILEOFF;
376 int whichfork = XFS_DATA_FORK;
377 struct xfs_bmbt_irec imap;
378 struct xfs_iext_cursor icur;
382 if (XFS_FORCED_SHUTDOWN(mp))
386 * COW fork blocks can overlap data fork blocks even if the blocks
387 * aren't shared. COW I/O always takes precedent, so we must always
388 * check for overlap on reflink inodes unless the mapping is already a
389 * COW one, or the COW fork hasn't changed from the last time we looked
392 * It's safe to check the COW fork if_seq here without the ILOCK because
393 * we've indirectly protected against concurrent updates: writeback has
394 * the page locked, which prevents concurrent invalidations by reflink
395 * and directio and prevents concurrent buffered writes to the same
396 * page. Changes to if_seq always happen under i_lock, which protects
397 * against concurrent updates and provides a memory barrier on the way
398 * out that ensures that we always see the current value.
400 if (xfs_imap_valid(wpc, ip, offset))
404 * If we don't have a valid map, now it's time to get a new one for this
405 * offset. This will convert delayed allocations (including COW ones)
406 * into real extents. If we return without a valid map, it means we
407 * landed in a hole and we skip the block.
410 xfs_ilock(ip, XFS_ILOCK_SHARED);
411 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
412 (ip->i_df.if_flags & XFS_IFEXTENTS));
415 * Check if this is offset is covered by a COW extents, and if yes use
416 * it directly instead of looking up anything in the data fork.
418 if (xfs_inode_has_cow_data(ip) &&
419 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
420 cow_fsb = imap.br_startoff;
421 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
422 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
423 xfs_iunlock(ip, XFS_ILOCK_SHARED);
425 whichfork = XFS_COW_FORK;
426 goto allocate_blocks;
430 * No COW extent overlap. Revalidate now that we may have updated
431 * ->cow_seq. If the data mapping is still valid, we're done.
433 if (xfs_imap_valid(wpc, ip, offset)) {
434 xfs_iunlock(ip, XFS_ILOCK_SHARED);
439 * If we don't have a valid map, now it's time to get a new one for this
440 * offset. This will convert delayed allocations (including COW ones)
443 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
444 imap.br_startoff = end_fsb; /* fake a hole past EOF */
445 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
446 xfs_iunlock(ip, XFS_ILOCK_SHARED);
448 /* landed in a hole or beyond EOF? */
449 if (imap.br_startoff > offset_fsb) {
450 imap.br_blockcount = imap.br_startoff - offset_fsb;
451 imap.br_startoff = offset_fsb;
452 imap.br_startblock = HOLESTARTBLOCK;
453 imap.br_state = XFS_EXT_NORM;
457 * Truncate to the next COW extent if there is one. This is the only
458 * opportunity to do this because we can skip COW fork lookups for the
459 * subsequent blocks in the mapping; however, the requirement to treat
460 * the COW range separately remains.
462 if (cow_fsb != NULLFILEOFF &&
463 cow_fsb < imap.br_startoff + imap.br_blockcount)
464 imap.br_blockcount = cow_fsb - imap.br_startoff;
466 /* got a delalloc extent? */
467 if (imap.br_startblock != HOLESTARTBLOCK &&
468 isnullstartblock(imap.br_startblock))
469 goto allocate_blocks;
471 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
472 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
475 error = xfs_convert_blocks(wpc, ip, whichfork, offset);
478 * If we failed to find the extent in the COW fork we might have
479 * raced with a COW to data fork conversion or truncate.
480 * Restart the lookup to catch the extent in the data fork for
481 * the former case, but prevent additional retries to avoid
482 * looping forever for the latter case.
484 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
486 ASSERT(error != -EAGAIN);
491 * Due to merging the return real extent might be larger than the
492 * original delalloc one. Trim the return extent to the next COW
493 * boundary again to force a re-lookup.
495 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
496 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
498 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
499 wpc->iomap.length = cow_offset - wpc->iomap.offset;
502 ASSERT(wpc->iomap.offset <= offset);
503 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
504 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
510 struct iomap_ioend *ioend,
513 unsigned int nofs_flag;
516 * We can allocate memory here while doing writeback on behalf of
517 * memory reclaim. To avoid memory allocation deadlocks set the
518 * task-wide nofs context for the following operations.
520 nofs_flag = memalloc_nofs_save();
522 /* Convert CoW extents to regular */
523 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
524 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
525 ioend->io_offset, ioend->io_size);
528 /* Reserve log space if we might write beyond the on-disk inode size. */
530 ((ioend->io_flags & IOMAP_F_SHARED) ||
531 ioend->io_type != IOMAP_UNWRITTEN) &&
532 xfs_ioend_is_append(ioend) &&
534 status = xfs_setfilesize_trans_alloc(ioend);
536 memalloc_nofs_restore(nofs_flag);
538 if (xfs_ioend_needs_workqueue(ioend))
539 ioend->io_bio->bi_end_io = xfs_end_bio;
544 * If the page has delalloc blocks on it, we need to punch them out before we
545 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
546 * inode that can trip up a later direct I/O read operation on the same region.
548 * We prevent this by truncating away the delalloc regions on the page. Because
549 * they are delalloc, we can do this without needing a transaction. Indeed - if
550 * we get ENOSPC errors, we have to be able to do this truncation without a
551 * transaction as there is no space left for block reservation (typically why we
552 * see a ENOSPC in writeback).
558 struct inode *inode = page->mapping->host;
559 struct xfs_inode *ip = XFS_I(inode);
560 struct xfs_mount *mp = ip->i_mount;
561 loff_t offset = page_offset(page);
562 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
565 if (XFS_FORCED_SHUTDOWN(mp))
569 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
570 page, ip->i_ino, offset);
572 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
573 PAGE_SIZE / i_blocksize(inode));
574 if (error && !XFS_FORCED_SHUTDOWN(mp))
575 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
577 iomap_invalidatepage(page, 0, PAGE_SIZE);
580 static const struct iomap_writeback_ops xfs_writeback_ops = {
581 .map_blocks = xfs_map_blocks,
582 .prepare_ioend = xfs_prepare_ioend,
583 .discard_page = xfs_discard_page,
589 struct writeback_control *wbc)
591 struct xfs_writepage_ctx wpc = { };
593 return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
598 struct address_space *mapping,
599 struct writeback_control *wbc)
601 struct xfs_writepage_ctx wpc = { };
603 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
604 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
609 struct address_space *mapping,
610 struct writeback_control *wbc)
612 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
613 return dax_writeback_mapping_range(mapping,
614 xfs_find_bdev_for_inode(mapping->host), wbc);
619 struct address_space *mapping,
622 struct xfs_inode *ip = XFS_I(mapping->host);
624 trace_xfs_vm_bmap(ip);
627 * The swap code (ab-)uses ->bmap to get a block mapping and then
628 * bypasses the file system for actual I/O. We really can't allow
629 * that on reflinks inodes, so we have to skip out here. And yes,
630 * 0 is the magic code for a bmap error.
632 * Since we don't pass back blockdev info, we can't return bmap
633 * information for rt files either.
635 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
637 return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
645 return iomap_readpage(page, &xfs_read_iomap_ops);
651 struct address_space *mapping,
652 struct list_head *pages,
655 return iomap_readpages(mapping, pages, nr_pages, &xfs_read_iomap_ops);
659 xfs_iomap_swapfile_activate(
660 struct swap_info_struct *sis,
661 struct file *swap_file,
664 sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
665 return iomap_swapfile_activate(sis, swap_file, span,
666 &xfs_read_iomap_ops);
669 const struct address_space_operations xfs_address_space_operations = {
670 .readpage = xfs_vm_readpage,
671 .readpages = xfs_vm_readpages,
672 .writepage = xfs_vm_writepage,
673 .writepages = xfs_vm_writepages,
674 .set_page_dirty = iomap_set_page_dirty,
675 .releasepage = iomap_releasepage,
676 .invalidatepage = iomap_invalidatepage,
678 .direct_IO = noop_direct_IO,
679 .migratepage = iomap_migrate_page,
680 .is_partially_uptodate = iomap_is_partially_uptodate,
681 .error_remove_page = generic_error_remove_page,
682 .swap_activate = xfs_iomap_swapfile_activate,
685 const struct address_space_operations xfs_dax_aops = {
686 .writepages = xfs_dax_writepages,
687 .direct_IO = noop_direct_IO,
688 .set_page_dirty = noop_set_page_dirty,
689 .invalidatepage = noop_invalidatepage,
690 .swap_activate = xfs_iomap_swapfile_activate,