1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
16 #include "xfs_alloc.h"
17 #include "xfs_error.h"
18 #include "xfs_iomap.h"
19 #include "xfs_trace.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_reflink.h"
24 #include <linux/writeback.h>
27 * structure owned by writepages passed to individual writepage calls
29 struct xfs_writepage_ctx {
30 struct xfs_bmbt_irec imap;
32 unsigned int data_seq;
34 struct xfs_ioend *ioend;
38 xfs_find_bdev_for_inode(
41 struct xfs_inode *ip = XFS_I(inode);
42 struct xfs_mount *mp = ip->i_mount;
44 if (XFS_IS_REALTIME_INODE(ip))
45 return mp->m_rtdev_targp->bt_bdev;
47 return mp->m_ddev_targp->bt_bdev;
51 xfs_find_daxdev_for_inode(
54 struct xfs_inode *ip = XFS_I(inode);
55 struct xfs_mount *mp = ip->i_mount;
57 if (XFS_IS_REALTIME_INODE(ip))
58 return mp->m_rtdev_targp->bt_daxdev;
60 return mp->m_ddev_targp->bt_daxdev;
64 xfs_finish_page_writeback(
69 struct iomap_page *iop = to_iomap_page(bvec->bv_page);
72 SetPageError(bvec->bv_page);
73 mapping_set_error(inode->i_mapping, -EIO);
76 ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
77 ASSERT(!iop || atomic_read(&iop->write_count) > 0);
79 if (!iop || atomic_dec_and_test(&iop->write_count))
80 end_page_writeback(bvec->bv_page);
84 * We're now finished for good with this ioend structure. Update the page
85 * state, release holds on bios, and finally free up memory. Do not use the
90 struct xfs_ioend *ioend,
93 struct inode *inode = ioend->io_inode;
94 struct bio *bio = &ioend->io_inline_bio;
95 struct bio *last = ioend->io_bio, *next;
96 u64 start = bio->bi_iter.bi_sector;
97 bool quiet = bio_flagged(bio, BIO_QUIET);
99 for (bio = &ioend->io_inline_bio; bio; bio = next) {
100 struct bio_vec *bvec;
102 struct bvec_iter_all iter_all;
105 * For the last bio, bi_private points to the ioend, so we
106 * need to explicitly end the iteration here.
111 next = bio->bi_private;
113 /* walk each page on bio, ending page IO on them */
114 bio_for_each_segment_all(bvec, bio, i, iter_all)
115 xfs_finish_page_writeback(inode, bvec, error);
119 if (unlikely(error && !quiet)) {
120 xfs_err_ratelimited(XFS_I(inode)->i_mount,
121 "writeback error on sector %llu", start);
126 * Fast and loose check if this write could update the on-disk inode size.
128 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
130 return ioend->io_offset + ioend->io_size >
131 XFS_I(ioend->io_inode)->i_d.di_size;
135 xfs_setfilesize_trans_alloc(
136 struct xfs_ioend *ioend)
138 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
139 struct xfs_trans *tp;
142 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
143 XFS_TRANS_NOFS, &tp);
147 ioend->io_append_trans = tp;
150 * We may pass freeze protection with a transaction. So tell lockdep
153 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
155 * We hand off the transaction to the completion thread now, so
156 * clear the flag here.
158 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
163 * Update on-disk file size now that data has been written to disk.
167 struct xfs_inode *ip,
168 struct xfs_trans *tp,
174 xfs_ilock(ip, XFS_ILOCK_EXCL);
175 isize = xfs_new_eof(ip, offset + size);
177 xfs_iunlock(ip, XFS_ILOCK_EXCL);
178 xfs_trans_cancel(tp);
182 trace_xfs_setfilesize(ip, offset, size);
184 ip->i_d.di_size = isize;
185 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
186 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
188 return xfs_trans_commit(tp);
193 struct xfs_inode *ip,
197 struct xfs_mount *mp = ip->i_mount;
198 struct xfs_trans *tp;
201 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
205 return __xfs_setfilesize(ip, tp, offset, size);
209 xfs_setfilesize_ioend(
210 struct xfs_ioend *ioend,
213 struct xfs_inode *ip = XFS_I(ioend->io_inode);
214 struct xfs_trans *tp = ioend->io_append_trans;
217 * The transaction may have been allocated in the I/O submission thread,
218 * thus we need to mark ourselves as being in a transaction manually.
219 * Similarly for freeze protection.
221 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
222 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
224 /* we abort the update if there was an IO error */
226 xfs_trans_cancel(tp);
230 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
234 * IO write completion.
238 struct work_struct *work)
240 struct xfs_ioend *ioend =
241 container_of(work, struct xfs_ioend, io_work);
242 struct xfs_inode *ip = XFS_I(ioend->io_inode);
243 xfs_off_t offset = ioend->io_offset;
244 size_t size = ioend->io_size;
248 * Just clean up the in-memory strutures if the fs has been shut down.
250 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
256 * Clean up any COW blocks on an I/O error.
258 error = blk_status_to_errno(ioend->io_bio->bi_status);
259 if (unlikely(error)) {
260 if (ioend->io_fork == XFS_COW_FORK)
261 xfs_reflink_cancel_cow_range(ip, offset, size, true);
266 * Success: commit the COW or unwritten blocks if needed.
268 if (ioend->io_fork == XFS_COW_FORK)
269 error = xfs_reflink_end_cow(ip, offset, size);
270 else if (ioend->io_state == XFS_EXT_UNWRITTEN)
271 error = xfs_iomap_write_unwritten(ip, offset, size, false);
273 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
276 if (ioend->io_append_trans)
277 error = xfs_setfilesize_ioend(ioend, error);
278 xfs_destroy_ioend(ioend, error);
285 struct xfs_ioend *ioend = bio->bi_private;
286 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
288 if (ioend->io_fork == XFS_COW_FORK ||
289 ioend->io_state == XFS_EXT_UNWRITTEN)
290 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
291 else if (ioend->io_append_trans)
292 queue_work(mp->m_data_workqueue, &ioend->io_work);
294 xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
298 * Fast revalidation of the cached writeback mapping. Return true if the current
299 * mapping is valid, false otherwise.
303 struct xfs_writepage_ctx *wpc,
304 struct xfs_inode *ip,
305 xfs_fileoff_t offset_fsb)
307 if (offset_fsb < wpc->imap.br_startoff ||
308 offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
311 * If this is a COW mapping, it is sufficient to check that the mapping
312 * covers the offset. Be careful to check this first because the caller
313 * can revalidate a COW mapping without updating the data seqno.
315 if (wpc->fork == XFS_COW_FORK)
319 * This is not a COW mapping. Check the sequence number of the data fork
320 * because concurrent changes could have invalidated the extent. Check
321 * the COW fork because concurrent changes since the last time we
322 * checked (and found nothing at this offset) could have added
323 * overlapping blocks.
325 if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
327 if (xfs_inode_has_cow_data(ip) &&
328 wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
334 * Pass in a dellalloc extent and convert it to real extents, return the real
335 * extent that maps offset_fsb in wpc->imap.
337 * The current page is held locked so nothing could have removed the block
338 * backing offset_fsb, although it could have moved from the COW to the data
339 * fork by another thread.
343 struct xfs_writepage_ctx *wpc,
344 struct xfs_inode *ip,
345 xfs_fileoff_t offset_fsb)
350 * Attempt to allocate whatever delalloc extent currently backs
351 * offset_fsb and put the result into wpc->imap. Allocate in a loop
352 * because it may take several attempts to allocate real blocks for a
353 * contiguous delalloc extent if free space is sufficiently fragmented.
356 error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
357 &wpc->imap, wpc->fork == XFS_COW_FORK ?
358 &wpc->cow_seq : &wpc->data_seq);
361 } while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
368 struct xfs_writepage_ctx *wpc,
372 struct xfs_inode *ip = XFS_I(inode);
373 struct xfs_mount *mp = ip->i_mount;
374 ssize_t count = i_blocksize(inode);
375 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
376 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
377 xfs_fileoff_t cow_fsb = NULLFILEOFF;
378 struct xfs_bmbt_irec imap;
379 struct xfs_iext_cursor icur;
383 if (XFS_FORCED_SHUTDOWN(mp))
387 * COW fork blocks can overlap data fork blocks even if the blocks
388 * aren't shared. COW I/O always takes precedent, so we must always
389 * check for overlap on reflink inodes unless the mapping is already a
390 * COW one, or the COW fork hasn't changed from the last time we looked
393 * It's safe to check the COW fork if_seq here without the ILOCK because
394 * we've indirectly protected against concurrent updates: writeback has
395 * the page locked, which prevents concurrent invalidations by reflink
396 * and directio and prevents concurrent buffered writes to the same
397 * page. Changes to if_seq always happen under i_lock, which protects
398 * against concurrent updates and provides a memory barrier on the way
399 * out that ensures that we always see the current value.
401 if (xfs_imap_valid(wpc, ip, offset_fsb))
405 * If we don't have a valid map, now it's time to get a new one for this
406 * offset. This will convert delayed allocations (including COW ones)
407 * into real extents. If we return without a valid map, it means we
408 * landed in a hole and we skip the block.
411 xfs_ilock(ip, XFS_ILOCK_SHARED);
412 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
413 (ip->i_df.if_flags & XFS_IFEXTENTS));
416 * Check if this is offset is covered by a COW extents, and if yes use
417 * it directly instead of looking up anything in the data fork.
419 if (xfs_inode_has_cow_data(ip) &&
420 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
421 cow_fsb = imap.br_startoff;
422 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
423 wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
424 xfs_iunlock(ip, XFS_ILOCK_SHARED);
426 wpc->fork = XFS_COW_FORK;
427 goto allocate_blocks;
431 * No COW extent overlap. Revalidate now that we may have updated
432 * ->cow_seq. If the data mapping is still valid, we're done.
434 if (xfs_imap_valid(wpc, ip, offset_fsb)) {
435 xfs_iunlock(ip, XFS_ILOCK_SHARED);
440 * If we don't have a valid map, now it's time to get a new one for this
441 * offset. This will convert delayed allocations (including COW ones)
444 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
445 imap.br_startoff = end_fsb; /* fake a hole past EOF */
446 wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
447 xfs_iunlock(ip, XFS_ILOCK_SHARED);
449 wpc->fork = XFS_DATA_FORK;
451 /* landed in a hole or beyond EOF? */
452 if (imap.br_startoff > offset_fsb) {
453 imap.br_blockcount = imap.br_startoff - offset_fsb;
454 imap.br_startoff = offset_fsb;
455 imap.br_startblock = HOLESTARTBLOCK;
456 imap.br_state = XFS_EXT_NORM;
460 * Truncate to the next COW extent if there is one. This is the only
461 * opportunity to do this because we can skip COW fork lookups for the
462 * subsequent blocks in the mapping; however, the requirement to treat
463 * the COW range separately remains.
465 if (cow_fsb != NULLFILEOFF &&
466 cow_fsb < imap.br_startoff + imap.br_blockcount)
467 imap.br_blockcount = cow_fsb - imap.br_startoff;
469 /* got a delalloc extent? */
470 if (imap.br_startblock != HOLESTARTBLOCK &&
471 isnullstartblock(imap.br_startblock))
472 goto allocate_blocks;
475 trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
478 error = xfs_convert_blocks(wpc, ip, offset_fsb);
481 * If we failed to find the extent in the COW fork we might have
482 * raced with a COW to data fork conversion or truncate.
483 * Restart the lookup to catch the extent in the data fork for
484 * the former case, but prevent additional retries to avoid
485 * looping forever for the latter case.
487 if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
489 ASSERT(error != -EAGAIN);
494 * Due to merging the return real extent might be larger than the
495 * original delalloc one. Trim the return extent to the next COW
496 * boundary again to force a re-lookup.
498 if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
499 cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
500 wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
502 ASSERT(wpc->imap.br_startoff <= offset_fsb);
503 ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
504 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
509 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
510 * it, and we submit that bio. The ioend may be used for multiple bio
511 * submissions, so we only want to allocate an append transaction for the ioend
512 * once. In the case of multiple bio submission, each bio will take an IO
513 * reference to the ioend to ensure that the ioend completion is only done once
514 * all bios have been submitted and the ioend is really done.
516 * If @fail is non-zero, it means that we have a situation where some part of
517 * the submission process has failed after we have marked paged for writeback
518 * and unlocked them. In this situation, we need to fail the bio and ioend
519 * rather than submit it to IO. This typically only happens on a filesystem
524 struct writeback_control *wbc,
525 struct xfs_ioend *ioend,
528 /* Convert CoW extents to regular */
529 if (!status && ioend->io_fork == XFS_COW_FORK) {
531 * Yuk. This can do memory allocation, but is not a
532 * transactional operation so everything is done in GFP_KERNEL
533 * context. That can deadlock, because we hold pages in
534 * writeback state and GFP_KERNEL allocations can block on them.
535 * Hence we must operate in nofs conditions here.
539 nofs_flag = memalloc_nofs_save();
540 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
541 ioend->io_offset, ioend->io_size);
542 memalloc_nofs_restore(nofs_flag);
545 /* Reserve log space if we might write beyond the on-disk inode size. */
547 (ioend->io_fork == XFS_COW_FORK ||
548 ioend->io_state != XFS_EXT_UNWRITTEN) &&
549 xfs_ioend_is_append(ioend) &&
550 !ioend->io_append_trans)
551 status = xfs_setfilesize_trans_alloc(ioend);
553 ioend->io_bio->bi_private = ioend;
554 ioend->io_bio->bi_end_io = xfs_end_bio;
555 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
558 * If we are failing the IO now, just mark the ioend with an
559 * error and finish it. This will run IO completion immediately
560 * as there is only one reference to the ioend at this point in
564 ioend->io_bio->bi_status = errno_to_blk_status(status);
565 bio_endio(ioend->io_bio);
569 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
570 submit_bio(ioend->io_bio);
574 static struct xfs_ioend *
580 struct block_device *bdev,
583 struct xfs_ioend *ioend;
586 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
587 bio_set_dev(bio, bdev);
588 bio->bi_iter.bi_sector = sector;
590 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
591 INIT_LIST_HEAD(&ioend->io_list);
592 ioend->io_fork = fork;
593 ioend->io_state = state;
594 ioend->io_inode = inode;
596 ioend->io_offset = offset;
597 INIT_WORK(&ioend->io_work, xfs_end_io);
598 ioend->io_append_trans = NULL;
604 * Allocate a new bio, and chain the old bio to the new one.
606 * Note that we have to do perform the chaining in this unintuitive order
607 * so that the bi_private linkage is set up in the right direction for the
608 * traversal in xfs_destroy_ioend().
612 struct xfs_ioend *ioend,
613 struct writeback_control *wbc,
614 struct block_device *bdev,
619 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
620 bio_set_dev(new, bdev);
621 new->bi_iter.bi_sector = sector;
622 bio_chain(ioend->io_bio, new);
623 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
624 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
625 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
626 submit_bio(ioend->io_bio);
631 * Test to see if we have an existing ioend structure that we could append to
632 * first, otherwise finish off the current ioend and start another.
639 struct iomap_page *iop,
640 struct xfs_writepage_ctx *wpc,
641 struct writeback_control *wbc,
642 struct list_head *iolist)
644 struct xfs_inode *ip = XFS_I(inode);
645 struct xfs_mount *mp = ip->i_mount;
646 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
647 unsigned len = i_blocksize(inode);
648 unsigned poff = offset & (PAGE_SIZE - 1);
651 sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
652 ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
655 wpc->fork != wpc->ioend->io_fork ||
656 wpc->imap.br_state != wpc->ioend->io_state ||
657 sector != bio_end_sector(wpc->ioend->io_bio) ||
658 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
660 list_add(&wpc->ioend->io_list, iolist);
661 wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
662 wpc->imap.br_state, offset, bdev, sector);
665 if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, true)) {
667 atomic_inc(&iop->write_count);
668 if (bio_full(wpc->ioend->io_bio))
669 xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
670 bio_add_page(wpc->ioend->io_bio, page, len, poff);
673 wpc->ioend->io_size += len;
677 xfs_vm_invalidatepage(
682 trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
683 iomap_invalidatepage(page, offset, length);
687 * If the page has delalloc blocks on it, we need to punch them out before we
688 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
689 * inode that can trip up a later direct I/O read operation on the same region.
691 * We prevent this by truncating away the delalloc regions on the page. Because
692 * they are delalloc, we can do this without needing a transaction. Indeed - if
693 * we get ENOSPC errors, we have to be able to do this truncation without a
694 * transaction as there is no space left for block reservation (typically why we
695 * see a ENOSPC in writeback).
698 xfs_aops_discard_page(
701 struct inode *inode = page->mapping->host;
702 struct xfs_inode *ip = XFS_I(inode);
703 struct xfs_mount *mp = ip->i_mount;
704 loff_t offset = page_offset(page);
705 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
708 if (XFS_FORCED_SHUTDOWN(mp))
712 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
713 page, ip->i_ino, offset);
715 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
716 PAGE_SIZE / i_blocksize(inode));
717 if (error && !XFS_FORCED_SHUTDOWN(mp))
718 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
720 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
724 * We implement an immediate ioend submission policy here to avoid needing to
725 * chain multiple ioends and hence nest mempool allocations which can violate
726 * forward progress guarantees we need to provide. The current ioend we are
727 * adding blocks to is cached on the writepage context, and if the new block
728 * does not append to the cached ioend it will create a new ioend and cache that
731 * If a new ioend is created and cached, the old ioend is returned and queued
732 * locally for submission once the entire page is processed or an error has been
733 * detected. While ioends are submitted immediately after they are completed,
734 * batching optimisations are provided by higher level block plugging.
736 * At the end of a writeback pass, there will be a cached ioend remaining on the
737 * writepage context that the caller will need to submit.
741 struct xfs_writepage_ctx *wpc,
742 struct writeback_control *wbc,
747 LIST_HEAD(submit_list);
748 struct iomap_page *iop = to_iomap_page(page);
749 unsigned len = i_blocksize(inode);
750 struct xfs_ioend *ioend, *next;
751 uint64_t file_offset; /* file offset of page */
752 int error = 0, count = 0, i;
754 ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
755 ASSERT(!iop || atomic_read(&iop->write_count) == 0);
758 * Walk through the page to find areas to write back. If we run off the
759 * end of the current map or find the current map invalid, grab a new
762 for (i = 0, file_offset = page_offset(page);
763 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
764 i++, file_offset += len) {
765 if (iop && !test_bit(i, iop->uptodate))
768 error = xfs_map_blocks(wpc, inode, file_offset);
771 if (wpc->imap.br_startblock == HOLESTARTBLOCK)
773 xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
778 ASSERT(wpc->ioend || list_empty(&submit_list));
779 ASSERT(PageLocked(page));
780 ASSERT(!PageWriteback(page));
783 * On error, we have to fail the ioend here because we may have set
784 * pages under writeback, we have to make sure we run IO completion to
785 * mark the error state of the IO appropriately, so we can't cancel the
786 * ioend directly here. That means we have to mark this page as under
787 * writeback if we included any blocks from it in the ioend chain so
788 * that completion treats it correctly.
790 * If we didn't include the page in the ioend, the on error we can
791 * simply discard and unlock it as there are no other users of the page
792 * now. The caller will still need to trigger submission of outstanding
793 * ioends on the writepage context so they are treated correctly on
796 if (unlikely(error)) {
798 xfs_aops_discard_page(page);
799 ClearPageUptodate(page);
805 * If the page was not fully cleaned, we need to ensure that the
806 * higher layers come back to it correctly. That means we need
807 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
808 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
809 * so another attempt to write this page in this writeback sweep
812 set_page_writeback_keepwrite(page);
814 clear_page_dirty_for_io(page);
815 set_page_writeback(page);
821 * Preserve the original error if there was one, otherwise catch
822 * submission errors here and propagate into subsequent ioend
825 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
828 list_del_init(&ioend->io_list);
829 error2 = xfs_submit_ioend(wbc, ioend, error);
830 if (error2 && !error)
835 * We can end up here with no error and nothing to write only if we race
836 * with a partial page truncate on a sub-page block sized filesystem.
839 end_page_writeback(page);
841 mapping_set_error(page->mapping, error);
846 * Write out a dirty page.
848 * For delalloc space on the page we need to allocate space and flush it.
849 * For unwritten space on the page we need to start the conversion to
850 * regular allocated space.
855 struct writeback_control *wbc,
858 struct xfs_writepage_ctx *wpc = data;
859 struct inode *inode = page->mapping->host;
864 trace_xfs_writepage(inode, page, 0, 0);
867 * Refuse to write the page out if we are called from reclaim context.
869 * This avoids stack overflows when called from deeply used stacks in
870 * random callers for direct reclaim or memcg reclaim. We explicitly
871 * allow reclaim from kswapd as the stack usage there is relatively low.
873 * This should never happen except in the case of a VM regression so
876 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
881 * Given that we do not allow direct reclaim to call us, we should
882 * never be called while in a filesystem transaction.
884 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
888 * Is this page beyond the end of the file?
890 * The page index is less than the end_index, adjust the end_offset
891 * to the highest offset that this page should represent.
892 * -----------------------------------------------------
893 * | file mapping | <EOF> |
894 * -----------------------------------------------------
895 * | Page ... | Page N-2 | Page N-1 | Page N | |
896 * ^--------------------------------^----------|--------
897 * | desired writeback range | see else |
898 * ---------------------------------^------------------|
900 offset = i_size_read(inode);
901 end_index = offset >> PAGE_SHIFT;
902 if (page->index < end_index)
903 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
906 * Check whether the page to write out is beyond or straddles
908 * -------------------------------------------------------
909 * | file mapping | <EOF> |
910 * -------------------------------------------------------
911 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
912 * ^--------------------------------^-----------|---------
914 * ---------------------------------^-----------|--------|
916 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
919 * Skip the page if it is fully outside i_size, e.g. due to a
920 * truncate operation that is in progress. We must redirty the
921 * page so that reclaim stops reclaiming it. Otherwise
922 * xfs_vm_releasepage() is called on it and gets confused.
924 * Note that the end_index is unsigned long, it would overflow
925 * if the given offset is greater than 16TB on 32-bit system
926 * and if we do check the page is fully outside i_size or not
927 * via "if (page->index >= end_index + 1)" as "end_index + 1"
928 * will be evaluated to 0. Hence this page will be redirtied
929 * and be written out repeatedly which would result in an
930 * infinite loop, the user program that perform this operation
931 * will hang. Instead, we can verify this situation by checking
932 * if the page to write is totally beyond the i_size or if it's
933 * offset is just equal to the EOF.
935 if (page->index > end_index ||
936 (page->index == end_index && offset_into_page == 0))
940 * The page straddles i_size. It must be zeroed out on each
941 * and every writepage invocation because it may be mmapped.
942 * "A file is mapped in multiples of the page size. For a file
943 * that is not a multiple of the page size, the remaining
944 * memory is zeroed when mapped, and writes to that region are
945 * not written out to the file."
947 zero_user_segment(page, offset_into_page, PAGE_SIZE);
949 /* Adjust the end_offset to the end of file */
953 return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
956 redirty_page_for_writepage(wbc, page);
964 struct writeback_control *wbc)
966 struct xfs_writepage_ctx wpc = { };
969 ret = xfs_do_writepage(page, wbc, &wpc);
971 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
977 struct address_space *mapping,
978 struct writeback_control *wbc)
980 struct xfs_writepage_ctx wpc = { };
983 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
984 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
986 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
992 struct address_space *mapping,
993 struct writeback_control *wbc)
995 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
996 return dax_writeback_mapping_range(mapping,
997 xfs_find_bdev_for_inode(mapping->host), wbc);
1005 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1006 return iomap_releasepage(page, gfp_mask);
1011 struct address_space *mapping,
1014 struct xfs_inode *ip = XFS_I(mapping->host);
1016 trace_xfs_vm_bmap(ip);
1019 * The swap code (ab-)uses ->bmap to get a block mapping and then
1020 * bypasses the file system for actual I/O. We really can't allow
1021 * that on reflinks inodes, so we have to skip out here. And yes,
1022 * 0 is the magic code for a bmap error.
1024 * Since we don't pass back blockdev info, we can't return bmap
1025 * information for rt files either.
1027 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1029 return iomap_bmap(mapping, block, &xfs_iomap_ops);
1034 struct file *unused,
1037 trace_xfs_vm_readpage(page->mapping->host, 1);
1038 return iomap_readpage(page, &xfs_iomap_ops);
1043 struct file *unused,
1044 struct address_space *mapping,
1045 struct list_head *pages,
1048 trace_xfs_vm_readpages(mapping->host, nr_pages);
1049 return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1053 xfs_iomap_swapfile_activate(
1054 struct swap_info_struct *sis,
1055 struct file *swap_file,
1058 sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
1059 return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
1062 const struct address_space_operations xfs_address_space_operations = {
1063 .readpage = xfs_vm_readpage,
1064 .readpages = xfs_vm_readpages,
1065 .writepage = xfs_vm_writepage,
1066 .writepages = xfs_vm_writepages,
1067 .set_page_dirty = iomap_set_page_dirty,
1068 .releasepage = xfs_vm_releasepage,
1069 .invalidatepage = xfs_vm_invalidatepage,
1070 .bmap = xfs_vm_bmap,
1071 .direct_IO = noop_direct_IO,
1072 .migratepage = iomap_migrate_page,
1073 .is_partially_uptodate = iomap_is_partially_uptodate,
1074 .error_remove_page = generic_error_remove_page,
1075 .swap_activate = xfs_iomap_swapfile_activate,
1078 const struct address_space_operations xfs_dax_aops = {
1079 .writepages = xfs_dax_writepages,
1080 .direct_IO = noop_direct_IO,
1081 .set_page_dirty = noop_set_page_dirty,
1082 .invalidatepage = noop_invalidatepage,
1083 .swap_activate = xfs_iomap_swapfile_activate,