1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
22 #include "../internal.h"
25 * Structure allocated for each page when block size < PAGE_SIZE to track
26 * sub-page uptodate status and I/O completions.
31 DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
34 static inline struct iomap_page *to_iomap_page(struct page *page)
36 if (page_has_private(page))
37 return (struct iomap_page *)page_private(page);
41 static struct bio_set iomap_ioend_bioset;
43 static struct iomap_page *
44 iomap_page_create(struct inode *inode, struct page *page)
46 struct iomap_page *iop = to_iomap_page(page);
48 if (iop || i_blocksize(inode) == PAGE_SIZE)
51 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
52 atomic_set(&iop->read_count, 0);
53 atomic_set(&iop->write_count, 0);
54 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
57 * migrate_page_move_mapping() assumes that pages with private data have
58 * their count elevated by 1.
61 set_page_private(page, (unsigned long)iop);
67 iomap_page_release(struct page *page)
69 struct iomap_page *iop = to_iomap_page(page);
73 WARN_ON_ONCE(atomic_read(&iop->read_count));
74 WARN_ON_ONCE(atomic_read(&iop->write_count));
75 ClearPagePrivate(page);
76 set_page_private(page, 0);
82 * Calculate the range inside the page that we actually need to read.
85 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
86 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
88 loff_t orig_pos = *pos;
89 loff_t isize = i_size_read(inode);
90 unsigned block_bits = inode->i_blkbits;
91 unsigned block_size = (1 << block_bits);
92 unsigned poff = offset_in_page(*pos);
93 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
94 unsigned first = poff >> block_bits;
95 unsigned last = (poff + plen - 1) >> block_bits;
98 * If the block size is smaller than the page size we need to check the
99 * per-block uptodate status and adjust the offset and length if needed
100 * to avoid reading in already uptodate ranges.
105 /* move forward for each leading block marked uptodate */
106 for (i = first; i <= last; i++) {
107 if (!test_bit(i, iop->uptodate))
115 /* truncate len if we find any trailing uptodate block(s) */
116 for ( ; i <= last; i++) {
117 if (test_bit(i, iop->uptodate)) {
118 plen -= (last - i + 1) * block_size;
126 * If the extent spans the block that contains the i_size we need to
127 * handle both halves separately so that we properly zero data in the
128 * page cache for blocks that are entirely outside of i_size.
130 if (orig_pos <= isize && orig_pos + length > isize) {
131 unsigned end = offset_in_page(isize - 1) >> block_bits;
133 if (first <= end && last > end)
134 plen -= (last - end) * block_size;
142 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
144 struct iomap_page *iop = to_iomap_page(page);
145 struct inode *inode = page->mapping->host;
146 unsigned first = off >> inode->i_blkbits;
147 unsigned last = (off + len - 1) >> inode->i_blkbits;
149 bool uptodate = true;
152 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
153 if (i >= first && i <= last)
154 set_bit(i, iop->uptodate);
155 else if (!test_bit(i, iop->uptodate))
160 if (uptodate && !PageError(page))
161 SetPageUptodate(page);
165 iomap_read_finish(struct iomap_page *iop, struct page *page)
167 if (!iop || atomic_dec_and_test(&iop->read_count))
172 iomap_read_page_end_io(struct bio_vec *bvec, int error)
174 struct page *page = bvec->bv_page;
175 struct iomap_page *iop = to_iomap_page(page);
177 if (unlikely(error)) {
178 ClearPageUptodate(page);
181 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
184 iomap_read_finish(iop, page);
188 iomap_read_end_io(struct bio *bio)
190 int error = blk_status_to_errno(bio->bi_status);
191 struct bio_vec *bvec;
192 struct bvec_iter_all iter_all;
194 bio_for_each_segment_all(bvec, bio, iter_all)
195 iomap_read_page_end_io(bvec, error);
199 struct iomap_readpage_ctx {
200 struct page *cur_page;
201 bool cur_page_in_bio;
204 struct list_head *pages;
208 iomap_read_inline_data(struct inode *inode, struct page *page,
211 size_t size = i_size_read(inode);
214 if (PageUptodate(page))
218 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
220 addr = kmap_atomic(page);
221 memcpy(addr, iomap->inline_data, size);
222 memset(addr + size, 0, PAGE_SIZE - size);
224 SetPageUptodate(page);
227 static inline bool iomap_block_needs_zeroing(struct inode *inode,
228 struct iomap *iomap, loff_t pos)
230 return iomap->type != IOMAP_MAPPED ||
231 (iomap->flags & IOMAP_F_NEW) ||
232 pos >= i_size_read(inode);
236 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
239 struct iomap_readpage_ctx *ctx = data;
240 struct page *page = ctx->cur_page;
241 struct iomap_page *iop = iomap_page_create(inode, page);
242 bool same_page = false, is_contig = false;
243 loff_t orig_pos = pos;
247 if (iomap->type == IOMAP_INLINE) {
249 iomap_read_inline_data(inode, page, iomap);
253 /* zero post-eof blocks as the page may be mapped */
254 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
258 if (iomap_block_needs_zeroing(inode, iomap, pos)) {
259 zero_user(page, poff, plen);
260 iomap_set_range_uptodate(page, poff, plen);
264 ctx->cur_page_in_bio = true;
267 * Try to merge into a previous segment if we can.
269 sector = iomap_sector(iomap, pos);
270 if (ctx->bio && bio_end_sector(ctx->bio) == sector)
274 __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
275 if (!same_page && iop)
276 atomic_inc(&iop->read_count);
281 * If we start a new segment we need to increase the read count, and we
282 * need to do so before submitting any previous full bio to make sure
283 * that we don't prematurely unlock the page.
286 atomic_inc(&iop->read_count);
288 if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
289 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
290 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
293 submit_bio(ctx->bio);
295 if (ctx->is_readahead) /* same as readahead_gfp_mask */
296 gfp |= __GFP_NORETRY | __GFP_NOWARN;
297 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
298 ctx->bio->bi_opf = REQ_OP_READ;
299 if (ctx->is_readahead)
300 ctx->bio->bi_opf |= REQ_RAHEAD;
301 ctx->bio->bi_iter.bi_sector = sector;
302 bio_set_dev(ctx->bio, iomap->bdev);
303 ctx->bio->bi_end_io = iomap_read_end_io;
306 bio_add_page(ctx->bio, page, plen, poff);
309 * Move the caller beyond our range so that it keeps making progress.
310 * For that we have to include any leading non-uptodate ranges, but
311 * we can skip trailing ones as they will be handled in the next
314 return pos - orig_pos + plen;
318 iomap_readpage(struct page *page, const struct iomap_ops *ops)
320 struct iomap_readpage_ctx ctx = { .cur_page = page };
321 struct inode *inode = page->mapping->host;
325 trace_iomap_readpage(page->mapping->host, 1);
327 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
328 ret = iomap_apply(inode, page_offset(page) + poff,
329 PAGE_SIZE - poff, 0, ops, &ctx,
330 iomap_readpage_actor);
332 WARN_ON_ONCE(ret == 0);
340 WARN_ON_ONCE(!ctx.cur_page_in_bio);
342 WARN_ON_ONCE(ctx.cur_page_in_bio);
347 * Just like mpage_readpages and block_read_full_page we always
348 * return 0 and just mark the page as PageError on errors. This
349 * should be cleaned up all through the stack eventually.
353 EXPORT_SYMBOL_GPL(iomap_readpage);
356 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
357 loff_t length, loff_t *done)
359 while (!list_empty(pages)) {
360 struct page *page = lru_to_page(pages);
362 if (page_offset(page) >= (u64)pos + length)
365 list_del(&page->lru);
366 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
371 * If we already have a page in the page cache at index we are
372 * done. Upper layers don't care if it is uptodate after the
373 * readpages call itself as every page gets checked again once
384 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
385 void *data, struct iomap *iomap)
387 struct iomap_readpage_ctx *ctx = data;
390 for (done = 0; done < length; done += ret) {
391 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
392 if (!ctx->cur_page_in_bio)
393 unlock_page(ctx->cur_page);
394 put_page(ctx->cur_page);
395 ctx->cur_page = NULL;
397 if (!ctx->cur_page) {
398 ctx->cur_page = iomap_next_page(inode, ctx->pages,
402 ctx->cur_page_in_bio = false;
404 ret = iomap_readpage_actor(inode, pos + done, length - done,
412 iomap_readpages(struct address_space *mapping, struct list_head *pages,
413 unsigned nr_pages, const struct iomap_ops *ops)
415 struct iomap_readpage_ctx ctx = {
417 .is_readahead = true,
419 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
420 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
421 loff_t length = last - pos + PAGE_SIZE, ret = 0;
423 trace_iomap_readpages(mapping->host, nr_pages);
426 ret = iomap_apply(mapping->host, pos, length, 0, ops,
427 &ctx, iomap_readpages_actor);
429 WARN_ON_ONCE(ret == 0);
440 if (!ctx.cur_page_in_bio)
441 unlock_page(ctx.cur_page);
442 put_page(ctx.cur_page);
446 * Check that we didn't lose a page due to the arcance calling
449 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
452 EXPORT_SYMBOL_GPL(iomap_readpages);
455 * iomap_is_partially_uptodate checks whether blocks within a page are
458 * Returns true if all blocks which correspond to a file portion
459 * we want to read within the page are uptodate.
462 iomap_is_partially_uptodate(struct page *page, unsigned long from,
465 struct iomap_page *iop = to_iomap_page(page);
466 struct inode *inode = page->mapping->host;
467 unsigned len, first, last;
470 /* Limit range to one page */
471 len = min_t(unsigned, PAGE_SIZE - from, count);
473 /* First and last blocks in range within page */
474 first = from >> inode->i_blkbits;
475 last = (from + len - 1) >> inode->i_blkbits;
478 for (i = first; i <= last; i++)
479 if (!test_bit(i, iop->uptodate))
486 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
489 iomap_releasepage(struct page *page, gfp_t gfp_mask)
491 trace_iomap_releasepage(page->mapping->host, page, 0, 0);
494 * mm accommodates an old ext3 case where clean pages might not have had
495 * the dirty bit cleared. Thus, it can send actual dirty pages to
496 * ->releasepage() via shrink_active_list(), skip those here.
498 if (PageDirty(page) || PageWriteback(page))
500 iomap_page_release(page);
503 EXPORT_SYMBOL_GPL(iomap_releasepage);
506 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
508 trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
511 * If we are invalidating the entire page, clear the dirty state from it
512 * and release it to avoid unnecessary buildup of the LRU.
514 if (offset == 0 && len == PAGE_SIZE) {
515 WARN_ON_ONCE(PageWriteback(page));
516 cancel_dirty_page(page);
517 iomap_page_release(page);
520 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
522 #ifdef CONFIG_MIGRATION
524 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
525 struct page *page, enum migrate_mode mode)
529 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
530 if (ret != MIGRATEPAGE_SUCCESS)
533 if (page_has_private(page)) {
534 ClearPagePrivate(page);
536 set_page_private(newpage, page_private(page));
537 set_page_private(page, 0);
539 SetPagePrivate(newpage);
542 if (mode != MIGRATE_SYNC_NO_COPY)
543 migrate_page_copy(newpage, page);
545 migrate_page_states(newpage, page);
546 return MIGRATEPAGE_SUCCESS;
548 EXPORT_SYMBOL_GPL(iomap_migrate_page);
549 #endif /* CONFIG_MIGRATION */
552 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
554 loff_t i_size = i_size_read(inode);
557 * Only truncate newly allocated pages beyoned EOF, even if the
558 * write started inside the existing inode size.
560 if (pos + len > i_size)
561 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
565 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
566 unsigned poff, unsigned plen, unsigned from, unsigned to,
572 if (iomap_block_needs_zeroing(inode, iomap, block_start)) {
573 zero_user_segments(page, poff, from, to, poff + plen);
574 iomap_set_range_uptodate(page, poff, plen);
578 bio_init(&bio, &bvec, 1);
579 bio.bi_opf = REQ_OP_READ;
580 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
581 bio_set_dev(&bio, iomap->bdev);
582 __bio_add_page(&bio, page, plen, poff);
583 return submit_bio_wait(&bio);
587 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
588 struct page *page, struct iomap *iomap)
590 struct iomap_page *iop = iomap_page_create(inode, page);
591 loff_t block_size = i_blocksize(inode);
592 loff_t block_start = pos & ~(block_size - 1);
593 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
594 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
597 if (PageUptodate(page))
601 iomap_adjust_read_range(inode, iop, &block_start,
602 block_end - block_start, &poff, &plen);
606 if ((from > poff && from < poff + plen) ||
607 (to > poff && to < poff + plen)) {
608 status = iomap_read_page_sync(inode, block_start, page,
609 poff, plen, from, to, iomap);
614 } while ((block_start += plen) < block_end);
620 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
621 struct page **pagep, struct iomap *iomap)
623 const struct iomap_page_ops *page_ops = iomap->page_ops;
624 pgoff_t index = pos >> PAGE_SHIFT;
628 BUG_ON(pos + len > iomap->offset + iomap->length);
630 if (fatal_signal_pending(current))
633 if (page_ops && page_ops->page_prepare) {
634 status = page_ops->page_prepare(inode, pos, len, iomap);
639 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
645 if (iomap->type == IOMAP_INLINE)
646 iomap_read_inline_data(inode, page, iomap);
647 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
648 status = __block_write_begin_int(page, pos, len, NULL, iomap);
650 status = __iomap_write_begin(inode, pos, len, page, iomap);
652 if (unlikely(status))
661 iomap_write_failed(inode, pos, len);
664 if (page_ops && page_ops->page_done)
665 page_ops->page_done(inode, pos, 0, NULL, iomap);
670 iomap_set_page_dirty(struct page *page)
672 struct address_space *mapping = page_mapping(page);
675 if (unlikely(!mapping))
676 return !TestSetPageDirty(page);
679 * Lock out page->mem_cgroup migration to keep PageDirty
680 * synchronized with per-memcg dirty page counters.
682 lock_page_memcg(page);
683 newly_dirty = !TestSetPageDirty(page);
685 __set_page_dirty(page, mapping, 0);
686 unlock_page_memcg(page);
689 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
692 EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
695 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
696 unsigned copied, struct page *page, struct iomap *iomap)
698 flush_dcache_page(page);
701 * The blocks that were entirely written will now be uptodate, so we
702 * don't have to worry about a readpage reading them and overwriting a
703 * partial write. However if we have encountered a short write and only
704 * partially written into a block, it will not be marked uptodate, so a
705 * readpage might come in and destroy our partial write.
707 * Do the simplest thing, and just treat any short write to a non
708 * uptodate page as a zero-length write, and force the caller to redo
711 if (unlikely(copied < len && !PageUptodate(page)))
713 iomap_set_range_uptodate(page, offset_in_page(pos), len);
714 iomap_set_page_dirty(page);
719 iomap_write_end_inline(struct inode *inode, struct page *page,
720 struct iomap *iomap, loff_t pos, unsigned copied)
724 WARN_ON_ONCE(!PageUptodate(page));
725 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
727 addr = kmap_atomic(page);
728 memcpy(iomap->inline_data + pos, addr + pos, copied);
731 mark_inode_dirty(inode);
736 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
737 unsigned copied, struct page *page, struct iomap *iomap)
739 const struct iomap_page_ops *page_ops = iomap->page_ops;
740 loff_t old_size = inode->i_size;
743 if (iomap->type == IOMAP_INLINE) {
744 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
745 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
746 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
749 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
753 * Update the in-memory inode size after copying the data into the page
754 * cache. It's up to the file system to write the updated size to disk,
755 * preferably after I/O completion so that no stale data is exposed.
757 if (pos + ret > old_size) {
758 i_size_write(inode, pos + ret);
759 iomap->flags |= IOMAP_F_SIZE_CHANGED;
764 pagecache_isize_extended(inode, old_size, pos);
765 if (page_ops && page_ops->page_done)
766 page_ops->page_done(inode, pos, ret, page, iomap);
770 iomap_write_failed(inode, pos, len);
775 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
778 struct iov_iter *i = data;
781 unsigned int flags = AOP_FLAG_NOFS;
785 unsigned long offset; /* Offset into pagecache page */
786 unsigned long bytes; /* Bytes to write to page */
787 size_t copied; /* Bytes copied from user */
789 offset = offset_in_page(pos);
790 bytes = min_t(unsigned long, PAGE_SIZE - offset,
797 * Bring in the user page that we will copy from _first_.
798 * Otherwise there's a nasty deadlock on copying from the
799 * same page as we're writing to, without it being marked
802 * Not only is this an optimisation, but it is also required
803 * to check that the address is actually valid, when atomic
804 * usercopies are used, below.
806 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
811 status = iomap_write_begin(inode, pos, bytes, flags, &page,
813 if (unlikely(status))
816 if (mapping_writably_mapped(inode->i_mapping))
817 flush_dcache_page(page);
819 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
821 flush_dcache_page(page);
823 status = iomap_write_end(inode, pos, bytes, copied, page,
825 if (unlikely(status < 0))
831 iov_iter_advance(i, copied);
832 if (unlikely(copied == 0)) {
834 * If we were unable to copy any data at all, we must
835 * fall back to a single segment length write.
837 * If we didn't fallback here, we could livelock
838 * because not all segments in the iov can be copied at
839 * once without a pagefault.
841 bytes = min_t(unsigned long, PAGE_SIZE - offset,
842 iov_iter_single_seg_count(i));
849 balance_dirty_pages_ratelimited(inode->i_mapping);
850 } while (iov_iter_count(i) && length);
852 return written ? written : status;
856 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
857 const struct iomap_ops *ops)
859 struct inode *inode = iocb->ki_filp->f_mapping->host;
860 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
862 while (iov_iter_count(iter)) {
863 ret = iomap_apply(inode, pos, iov_iter_count(iter),
864 IOMAP_WRITE, ops, iter, iomap_write_actor);
871 return written ? written : ret;
873 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
876 __iomap_read_page(struct inode *inode, loff_t offset)
878 struct address_space *mapping = inode->i_mapping;
881 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
884 if (!PageUptodate(page)) {
886 return ERR_PTR(-EIO);
892 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
899 struct page *page, *rpage;
900 unsigned long offset; /* Offset into pagecache page */
901 unsigned long bytes; /* Bytes to write to page */
903 offset = offset_in_page(pos);
904 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
906 rpage = __iomap_read_page(inode, pos);
908 return PTR_ERR(rpage);
910 status = iomap_write_begin(inode, pos, bytes,
911 AOP_FLAG_NOFS, &page, iomap);
913 if (unlikely(status))
916 WARN_ON_ONCE(!PageUptodate(page));
918 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
919 if (unlikely(status <= 0)) {
920 if (WARN_ON_ONCE(status == 0))
931 balance_dirty_pages_ratelimited(inode->i_mapping);
938 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
939 const struct iomap_ops *ops)
944 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
954 EXPORT_SYMBOL_GPL(iomap_file_dirty);
956 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
957 unsigned bytes, struct iomap *iomap)
962 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
967 zero_user(page, offset, bytes);
968 mark_page_accessed(page);
970 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
973 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
976 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
977 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
981 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
982 void *data, struct iomap *iomap)
984 bool *did_zero = data;
988 /* already zeroed? we're done. */
989 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
993 unsigned offset, bytes;
995 offset = offset_in_page(pos);
996 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
999 status = iomap_dax_zero(pos, offset, bytes, iomap);
1001 status = iomap_zero(inode, pos, offset, bytes, iomap);
1010 } while (count > 0);
1016 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1017 const struct iomap_ops *ops)
1022 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1023 ops, did_zero, iomap_zero_range_actor);
1033 EXPORT_SYMBOL_GPL(iomap_zero_range);
1036 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1037 const struct iomap_ops *ops)
1039 unsigned int blocksize = i_blocksize(inode);
1040 unsigned int off = pos & (blocksize - 1);
1042 /* Block boundary? Nothing to do */
1045 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1047 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1050 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1051 void *data, struct iomap *iomap)
1053 struct page *page = data;
1056 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1057 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1060 block_commit_write(page, 0, length);
1062 WARN_ON_ONCE(!PageUptodate(page));
1063 iomap_page_create(inode, page);
1064 set_page_dirty(page);
1070 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1072 struct page *page = vmf->page;
1073 struct inode *inode = file_inode(vmf->vma->vm_file);
1074 unsigned long length;
1075 loff_t offset, size;
1079 size = i_size_read(inode);
1080 if ((page->mapping != inode->i_mapping) ||
1081 (page_offset(page) > size)) {
1082 /* We overload EFAULT to mean page got truncated */
1087 /* page is wholly or partially inside EOF */
1088 if (((page->index + 1) << PAGE_SHIFT) > size)
1089 length = offset_in_page(size);
1093 offset = page_offset(page);
1094 while (length > 0) {
1095 ret = iomap_apply(inode, offset, length,
1096 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1097 iomap_page_mkwrite_actor);
1098 if (unlikely(ret <= 0))
1104 wait_for_stable_page(page);
1105 return VM_FAULT_LOCKED;
1108 return block_page_mkwrite_return(ret);
1110 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1113 iomap_finish_page_writeback(struct inode *inode, struct page *page,
1116 struct iomap_page *iop = to_iomap_page(page);
1120 mapping_set_error(inode->i_mapping, -EIO);
1123 WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
1124 WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
1126 if (!iop || atomic_dec_and_test(&iop->write_count))
1127 end_page_writeback(page);
1131 * We're now finished for good with this ioend structure. Update the page
1132 * state, release holds on bios, and finally free up memory. Do not use the
1136 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1138 struct inode *inode = ioend->io_inode;
1139 struct bio *bio = &ioend->io_inline_bio;
1140 struct bio *last = ioend->io_bio, *next;
1141 u64 start = bio->bi_iter.bi_sector;
1142 bool quiet = bio_flagged(bio, BIO_QUIET);
1144 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1146 struct bvec_iter_all iter_all;
1149 * For the last bio, bi_private points to the ioend, so we
1150 * need to explicitly end the iteration here.
1155 next = bio->bi_private;
1157 /* walk each page on bio, ending page IO on them */
1158 bio_for_each_segment_all(bv, bio, iter_all)
1159 iomap_finish_page_writeback(inode, bv->bv_page, error);
1163 if (unlikely(error && !quiet)) {
1164 printk_ratelimited(KERN_ERR
1165 "%s: writeback error on sector %llu",
1166 inode->i_sb->s_id, start);
1171 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1173 struct list_head tmp;
1175 list_replace_init(&ioend->io_list, &tmp);
1176 iomap_finish_ioend(ioend, error);
1178 while (!list_empty(&tmp)) {
1179 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1180 list_del_init(&ioend->io_list);
1181 iomap_finish_ioend(ioend, error);
1184 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1187 * We can merge two adjacent ioends if they have the same set of work to do.
1190 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1192 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1194 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1195 (next->io_flags & IOMAP_F_SHARED))
1197 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1198 (next->io_type == IOMAP_UNWRITTEN))
1200 if (ioend->io_offset + ioend->io_size != next->io_offset)
1206 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends,
1207 void (*merge_private)(struct iomap_ioend *ioend,
1208 struct iomap_ioend *next))
1210 struct iomap_ioend *next;
1212 INIT_LIST_HEAD(&ioend->io_list);
1214 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1216 if (!iomap_ioend_can_merge(ioend, next))
1218 list_move_tail(&next->io_list, &ioend->io_list);
1219 ioend->io_size += next->io_size;
1220 if (next->io_private && merge_private)
1221 merge_private(ioend, next);
1224 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1227 iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
1229 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1230 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1232 if (ia->io_offset < ib->io_offset)
1234 if (ia->io_offset > ib->io_offset)
1240 iomap_sort_ioends(struct list_head *ioend_list)
1242 list_sort(NULL, ioend_list, iomap_ioend_compare);
1244 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1246 static void iomap_writepage_end_bio(struct bio *bio)
1248 struct iomap_ioend *ioend = bio->bi_private;
1250 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1254 * Submit the final bio for an ioend.
1256 * If @error is non-zero, it means that we have a situation where some part of
1257 * the submission process has failed after we have marked paged for writeback
1258 * and unlocked them. In this situation, we need to fail the bio instead of
1259 * submitting it. This typically only happens on a filesystem shutdown.
1262 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1265 ioend->io_bio->bi_private = ioend;
1266 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1268 if (wpc->ops->prepare_ioend)
1269 error = wpc->ops->prepare_ioend(ioend, error);
1272 * If we are failing the IO now, just mark the ioend with an
1273 * error and finish it. This will run IO completion immediately
1274 * as there is only one reference to the ioend at this point in
1277 ioend->io_bio->bi_status = errno_to_blk_status(error);
1278 bio_endio(ioend->io_bio);
1282 submit_bio(ioend->io_bio);
1286 static struct iomap_ioend *
1287 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1288 loff_t offset, sector_t sector, struct writeback_control *wbc)
1290 struct iomap_ioend *ioend;
1293 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
1294 bio_set_dev(bio, wpc->iomap.bdev);
1295 bio->bi_iter.bi_sector = sector;
1296 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
1297 bio->bi_write_hint = inode->i_write_hint;
1298 wbc_init_bio(wbc, bio);
1300 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1301 INIT_LIST_HEAD(&ioend->io_list);
1302 ioend->io_type = wpc->iomap.type;
1303 ioend->io_flags = wpc->iomap.flags;
1304 ioend->io_inode = inode;
1306 ioend->io_offset = offset;
1307 ioend->io_private = NULL;
1308 ioend->io_bio = bio;
1313 * Allocate a new bio, and chain the old bio to the new one.
1315 * Note that we have to do perform the chaining in this unintuitive order
1316 * so that the bi_private linkage is set up in the right direction for the
1317 * traversal in iomap_finish_ioend().
1320 iomap_chain_bio(struct bio *prev)
1324 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
1325 bio_copy_dev(new, prev);/* also copies over blkcg information */
1326 new->bi_iter.bi_sector = bio_end_sector(prev);
1327 new->bi_opf = prev->bi_opf;
1328 new->bi_write_hint = prev->bi_write_hint;
1330 bio_chain(prev, new);
1331 bio_get(prev); /* for iomap_finish_ioend */
1337 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1340 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1341 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1343 if (wpc->iomap.type != wpc->ioend->io_type)
1345 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1347 if (sector != bio_end_sector(wpc->ioend->io_bio))
1353 * Test to see if we have an existing ioend structure that we could append to
1354 * first, otherwise finish off the current ioend and start another.
1357 iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
1358 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1359 struct writeback_control *wbc, struct list_head *iolist)
1361 sector_t sector = iomap_sector(&wpc->iomap, offset);
1362 unsigned len = i_blocksize(inode);
1363 unsigned poff = offset & (PAGE_SIZE - 1);
1364 bool merged, same_page = false;
1366 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
1368 list_add(&wpc->ioend->io_list, iolist);
1369 wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
1372 merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
1374 if (iop && !same_page)
1375 atomic_inc(&iop->write_count);
1378 if (bio_full(wpc->ioend->io_bio, len)) {
1379 wpc->ioend->io_bio =
1380 iomap_chain_bio(wpc->ioend->io_bio);
1382 bio_add_page(wpc->ioend->io_bio, page, len, poff);
1385 wpc->ioend->io_size += len;
1386 wbc_account_cgroup_owner(wbc, page, len);
1390 * We implement an immediate ioend submission policy here to avoid needing to
1391 * chain multiple ioends and hence nest mempool allocations which can violate
1392 * forward progress guarantees we need to provide. The current ioend we are
1393 * adding blocks to is cached on the writepage context, and if the new block
1394 * does not append to the cached ioend it will create a new ioend and cache that
1397 * If a new ioend is created and cached, the old ioend is returned and queued
1398 * locally for submission once the entire page is processed or an error has been
1399 * detected. While ioends are submitted immediately after they are completed,
1400 * batching optimisations are provided by higher level block plugging.
1402 * At the end of a writeback pass, there will be a cached ioend remaining on the
1403 * writepage context that the caller will need to submit.
1406 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1407 struct writeback_control *wbc, struct inode *inode,
1408 struct page *page, u64 end_offset)
1410 struct iomap_page *iop = to_iomap_page(page);
1411 struct iomap_ioend *ioend, *next;
1412 unsigned len = i_blocksize(inode);
1413 u64 file_offset; /* file offset of page */
1414 int error = 0, count = 0, i;
1415 LIST_HEAD(submit_list);
1417 WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
1418 WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
1421 * Walk through the page to find areas to write back. If we run off the
1422 * end of the current map or find the current map invalid, grab a new
1425 for (i = 0, file_offset = page_offset(page);
1426 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
1427 i++, file_offset += len) {
1428 if (iop && !test_bit(i, iop->uptodate))
1431 error = wpc->ops->map_blocks(wpc, inode, file_offset);
1434 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1436 if (wpc->iomap.type == IOMAP_HOLE)
1438 iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
1443 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1444 WARN_ON_ONCE(!PageLocked(page));
1445 WARN_ON_ONCE(PageWriteback(page));
1448 * We cannot cancel the ioend directly here on error. We may have
1449 * already set other pages under writeback and hence we have to run I/O
1450 * completion to mark the error state of the pages under writeback
1453 if (unlikely(error)) {
1456 * If the current page hasn't been added to ioend, it
1457 * won't be affected by I/O completions and we must
1458 * discard and unlock it right here.
1460 if (wpc->ops->discard_page)
1461 wpc->ops->discard_page(page);
1462 ClearPageUptodate(page);
1468 * If the page was not fully cleaned, we need to ensure that the
1469 * higher layers come back to it correctly. That means we need
1470 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
1471 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
1472 * so another attempt to write this page in this writeback sweep
1475 set_page_writeback_keepwrite(page);
1477 clear_page_dirty_for_io(page);
1478 set_page_writeback(page);
1484 * Preserve the original error if there was one, otherwise catch
1485 * submission errors here and propagate into subsequent ioend
1488 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1491 list_del_init(&ioend->io_list);
1492 error2 = iomap_submit_ioend(wpc, ioend, error);
1493 if (error2 && !error)
1498 * We can end up here with no error and nothing to write only if we race
1499 * with a partial page truncate on a sub-page block sized filesystem.
1502 end_page_writeback(page);
1504 mapping_set_error(page->mapping, error);
1509 * Write out a dirty page.
1511 * For delalloc space on the page we need to allocate space and flush it.
1512 * For unwritten space on the page we need to start the conversion to
1513 * regular allocated space.
1516 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1518 struct iomap_writepage_ctx *wpc = data;
1519 struct inode *inode = page->mapping->host;
1524 trace_iomap_writepage(inode, page, 0, 0);
1527 * Refuse to write the page out if we are called from reclaim context.
1529 * This avoids stack overflows when called from deeply used stacks in
1530 * random callers for direct reclaim or memcg reclaim. We explicitly
1531 * allow reclaim from kswapd as the stack usage there is relatively low.
1533 * This should never happen except in the case of a VM regression so
1536 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1541 * Given that we do not allow direct reclaim to call us, we should
1542 * never be called in a recursive filesystem reclaim context.
1544 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1548 * Is this page beyond the end of the file?
1550 * The page index is less than the end_index, adjust the end_offset
1551 * to the highest offset that this page should represent.
1552 * -----------------------------------------------------
1553 * | file mapping | <EOF> |
1554 * -----------------------------------------------------
1555 * | Page ... | Page N-2 | Page N-1 | Page N | |
1556 * ^--------------------------------^----------|--------
1557 * | desired writeback range | see else |
1558 * ---------------------------------^------------------|
1560 offset = i_size_read(inode);
1561 end_index = offset >> PAGE_SHIFT;
1562 if (page->index < end_index)
1563 end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
1566 * Check whether the page to write out is beyond or straddles
1568 * -------------------------------------------------------
1569 * | file mapping | <EOF> |
1570 * -------------------------------------------------------
1571 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1572 * ^--------------------------------^-----------|---------
1574 * ---------------------------------^-----------|--------|
1576 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1579 * Skip the page if it is fully outside i_size, e.g. due to a
1580 * truncate operation that is in progress. We must redirty the
1581 * page so that reclaim stops reclaiming it. Otherwise
1582 * iomap_vm_releasepage() is called on it and gets confused.
1584 * Note that the end_index is unsigned long, it would overflow
1585 * if the given offset is greater than 16TB on 32-bit system
1586 * and if we do check the page is fully outside i_size or not
1587 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1588 * will be evaluated to 0. Hence this page will be redirtied
1589 * and be written out repeatedly which would result in an
1590 * infinite loop, the user program that perform this operation
1591 * will hang. Instead, we can verify this situation by checking
1592 * if the page to write is totally beyond the i_size or if it's
1593 * offset is just equal to the EOF.
1595 if (page->index > end_index ||
1596 (page->index == end_index && offset_into_page == 0))
1600 * The page straddles i_size. It must be zeroed out on each
1601 * and every writepage invocation because it may be mmapped.
1602 * "A file is mapped in multiples of the page size. For a file
1603 * that is not a multiple of the page size, the remaining
1604 * memory is zeroed when mapped, and writes to that region are
1605 * not written out to the file."
1607 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1609 /* Adjust the end_offset to the end of file */
1610 end_offset = offset;
1613 return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
1616 redirty_page_for_writepage(wbc, page);
1622 iomap_writepage(struct page *page, struct writeback_control *wbc,
1623 struct iomap_writepage_ctx *wpc,
1624 const struct iomap_writeback_ops *ops)
1629 ret = iomap_do_writepage(page, wbc, wpc);
1632 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1634 EXPORT_SYMBOL_GPL(iomap_writepage);
1637 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1638 struct iomap_writepage_ctx *wpc,
1639 const struct iomap_writeback_ops *ops)
1644 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1647 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1649 EXPORT_SYMBOL_GPL(iomap_writepages);
1651 static int __init iomap_init(void)
1653 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1654 offsetof(struct iomap_ioend, io_inline_bio),
1657 fs_initcall(iomap_init);