1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
5 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 #include <linux/slab.h>
10 #include <linux/highmem.h>
11 #include <linux/pagemap.h>
12 #include <asm/byteorder.h>
13 #include <linux/swap.h>
14 #include <linux/pipe_fs_i.h>
15 #include <linux/mpage.h>
16 #include <linux/quotaops.h>
17 #include <linux/blkdev.h>
18 #include <linux/uio.h>
21 #include <cluster/masklog.h>
28 #include "extent_map.h"
35 #include "refcounttree.h"
36 #include "ocfs2_trace.h"
38 #include "buffer_head_io.h"
43 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
44 struct buffer_head *bh_result, int create)
48 struct ocfs2_dinode *fe = NULL;
49 struct buffer_head *bh = NULL;
50 struct buffer_head *buffer_cache_bh = NULL;
51 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
54 trace_ocfs2_symlink_get_block(
55 (unsigned long long)OCFS2_I(inode)->ip_blkno,
56 (unsigned long long)iblock, bh_result, create);
58 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
60 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
61 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
62 (unsigned long long)iblock);
66 status = ocfs2_read_inode_block(inode, &bh);
71 fe = (struct ocfs2_dinode *) bh->b_data;
73 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
74 le32_to_cpu(fe->i_clusters))) {
76 mlog(ML_ERROR, "block offset is outside the allocated size: "
77 "%llu\n", (unsigned long long)iblock);
81 /* We don't use the page cache to create symlink data, so if
82 * need be, copy it over from the buffer cache. */
83 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
84 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
86 buffer_cache_bh = sb_getblk(osb->sb, blkno);
87 if (!buffer_cache_bh) {
89 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
93 /* we haven't locked out transactions, so a commit
94 * could've happened. Since we've got a reference on
95 * the bh, even if it commits while we're doing the
96 * copy, the data is still good. */
97 if (buffer_jbd(buffer_cache_bh)
98 && ocfs2_inode_is_new(inode)) {
99 kaddr = kmap_atomic(bh_result->b_page);
101 mlog(ML_ERROR, "couldn't kmap!\n");
104 memcpy(kaddr + (bh_result->b_size * iblock),
105 buffer_cache_bh->b_data,
107 kunmap_atomic(kaddr);
108 set_buffer_uptodate(bh_result);
110 brelse(buffer_cache_bh);
113 map_bh(bh_result, inode->i_sb,
114 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
124 static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
125 struct buffer_head *bh_result, int create)
128 struct ocfs2_inode_info *oi = OCFS2_I(inode);
130 down_read(&oi->ip_alloc_sem);
131 ret = ocfs2_get_block(inode, iblock, bh_result, create);
132 up_read(&oi->ip_alloc_sem);
137 int ocfs2_get_block(struct inode *inode, sector_t iblock,
138 struct buffer_head *bh_result, int create)
141 unsigned int ext_flags;
142 u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
143 u64 p_blkno, count, past_eof;
144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
146 trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
147 (unsigned long long)iblock, bh_result, create);
149 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
150 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
151 inode, inode->i_ino);
153 if (S_ISLNK(inode->i_mode)) {
154 /* this always does I/O for some reason. */
155 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
159 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
162 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
163 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
164 (unsigned long long)p_blkno);
168 if (max_blocks < count)
172 * ocfs2 never allocates in this function - the only time we
173 * need to use BH_New is when we're extending i_size on a file
174 * system which doesn't support holes, in which case BH_New
175 * allows __block_write_begin() to zero.
177 * If we see this on a sparse file system, then a truncate has
178 * raced us and removed the cluster. In this case, we clear
179 * the buffers dirty and uptodate bits and let the buffer code
180 * ignore it as a hole.
182 if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
183 clear_buffer_dirty(bh_result);
184 clear_buffer_uptodate(bh_result);
188 /* Treat the unwritten extent as a hole for zeroing purposes. */
189 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
190 map_bh(bh_result, inode->i_sb, p_blkno);
192 bh_result->b_size = count << inode->i_blkbits;
194 if (!ocfs2_sparse_alloc(osb)) {
198 "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
199 (unsigned long long)iblock,
200 (unsigned long long)p_blkno,
201 (unsigned long long)OCFS2_I(inode)->ip_blkno);
202 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
208 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
210 trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
211 (unsigned long long)past_eof);
212 if (create && (iblock >= past_eof))
213 set_buffer_new(bh_result);
222 int ocfs2_read_inline_data(struct inode *inode, struct page *page,
223 struct buffer_head *di_bh)
227 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
229 if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
230 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
231 (unsigned long long)OCFS2_I(inode)->ip_blkno);
235 size = i_size_read(inode);
237 if (size > PAGE_SIZE ||
238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
239 ocfs2_error(inode->i_sb,
240 "Inode %llu has with inline data has bad size: %Lu\n",
241 (unsigned long long)OCFS2_I(inode)->ip_blkno,
242 (unsigned long long)size);
246 kaddr = kmap_atomic(page);
248 memcpy(kaddr, di->id2.i_data.id_data, size);
249 /* Clear the remaining part of the page */
250 memset(kaddr + size, 0, PAGE_SIZE - size);
251 flush_dcache_page(page);
252 kunmap_atomic(kaddr);
254 SetPageUptodate(page);
259 static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
262 struct buffer_head *di_bh = NULL;
264 BUG_ON(!PageLocked(page));
265 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
267 ret = ocfs2_read_inode_block(inode, &di_bh);
273 ret = ocfs2_read_inline_data(inode, page, di_bh);
281 static int ocfs2_readpage(struct file *file, struct page *page)
283 struct inode *inode = page->mapping->host;
284 struct ocfs2_inode_info *oi = OCFS2_I(inode);
285 loff_t start = (loff_t)page->index << PAGE_SHIFT;
288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
289 (page ? page->index : 0));
291 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
293 if (ret == AOP_TRUNCATED_PAGE)
299 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
301 * Unlock the page and cycle ip_alloc_sem so that we don't
302 * busyloop waiting for ip_alloc_sem to unlock
304 ret = AOP_TRUNCATED_PAGE;
307 down_read(&oi->ip_alloc_sem);
308 up_read(&oi->ip_alloc_sem);
309 goto out_inode_unlock;
313 * i_size might have just been updated as we grabed the meta lock. We
314 * might now be discovering a truncate that hit on another node.
315 * block_read_full_page->get_block freaks out if it is asked to read
316 * beyond the end of a file, so we check here. Callers
317 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
318 * and notice that the page they just read isn't needed.
320 * XXX sys_readahead() seems to get that wrong?
322 if (start >= i_size_read(inode)) {
323 zero_user(page, 0, PAGE_SIZE);
324 SetPageUptodate(page);
329 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
330 ret = ocfs2_readpage_inline(inode, page);
332 ret = block_read_full_page(page, ocfs2_get_block);
336 up_read(&oi->ip_alloc_sem);
338 ocfs2_inode_unlock(inode, 0);
346 * This is used only for read-ahead. Failures or difficult to handle
347 * situations are safe to ignore.
349 * Right now, we don't bother with BH_Boundary - in-inode extent lists
350 * are quite large (243 extents on 4k blocks), so most inodes don't
351 * grow out to a tree. If need be, detecting boundary extents could
352 * trivially be added in a future version of ocfs2_get_block().
354 static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
355 struct list_head *pages, unsigned nr_pages)
358 struct inode *inode = mapping->host;
359 struct ocfs2_inode_info *oi = OCFS2_I(inode);
364 * Use the nonblocking flag for the dlm code to avoid page
365 * lock inversion, but don't bother with retrying.
367 ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
371 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
372 ocfs2_inode_unlock(inode, 0);
377 * Don't bother with inline-data. There isn't anything
378 * to read-ahead in that case anyway...
380 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
384 * Check whether a remote node truncated this file - we just
385 * drop out in that case as it's not worth handling here.
387 last = lru_to_page(pages);
388 start = (loff_t)last->index << PAGE_SHIFT;
389 if (start >= i_size_read(inode))
392 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
395 up_read(&oi->ip_alloc_sem);
396 ocfs2_inode_unlock(inode, 0);
401 /* Note: Because we don't support holes, our allocation has
402 * already happened (allocation writes zeros to the file data)
403 * so we don't have to worry about ordered writes in
406 * ->writepage is called during the process of invalidating the page cache
407 * during blocked lock processing. It can't block on any cluster locks
408 * to during block mapping. It's relying on the fact that the block
409 * mapping can't have disappeared under the dirty pages that it is
410 * being asked to write back.
412 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
414 trace_ocfs2_writepage(
415 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
418 return block_write_full_page(page, ocfs2_get_block, wbc);
421 /* Taken from ext3. We don't necessarily need the full blown
422 * functionality yet, but IMHO it's better to cut and paste the whole
423 * thing so we can avoid introducing our own bugs (and easily pick up
424 * their fixes when they happen) --Mark */
425 int walk_page_buffers( handle_t *handle,
426 struct buffer_head *head,
430 int (*fn)( handle_t *handle,
431 struct buffer_head *bh))
433 struct buffer_head *bh;
434 unsigned block_start, block_end;
435 unsigned blocksize = head->b_size;
437 struct buffer_head *next;
439 for ( bh = head, block_start = 0;
440 ret == 0 && (bh != head || !block_start);
441 block_start = block_end, bh = next)
443 next = bh->b_this_page;
444 block_end = block_start + blocksize;
445 if (block_end <= from || block_start >= to) {
446 if (partial && !buffer_uptodate(bh))
450 err = (*fn)(handle, bh);
457 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
462 struct inode *inode = mapping->host;
464 trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
465 (unsigned long long)block);
468 * The swap code (ab-)uses ->bmap to get a block mapping and then
469 * bypasseѕ the file system for actual I/O. We really can't allow
470 * that on refcounted inodes, so we have to skip out here. And yes,
471 * 0 is the magic code for a bmap error..
473 if (ocfs2_is_refcount_inode(inode))
476 /* We don't need to lock journal system files, since they aren't
477 * accessed concurrently from multiple nodes.
479 if (!INODE_JOURNAL(inode)) {
480 err = ocfs2_inode_lock(inode, NULL, 0);
486 down_read(&OCFS2_I(inode)->ip_alloc_sem);
489 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
490 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
493 if (!INODE_JOURNAL(inode)) {
494 up_read(&OCFS2_I(inode)->ip_alloc_sem);
495 ocfs2_inode_unlock(inode, 0);
499 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
500 (unsigned long long)block);
506 status = err ? 0 : p_blkno;
511 static int ocfs2_releasepage(struct page *page, gfp_t wait)
513 if (!page_has_buffers(page))
515 return try_to_free_buffers(page);
518 static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
523 unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
525 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
528 cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
530 cluster_start = cpos % cpp;
531 cluster_start = cluster_start << osb->s_clustersize_bits;
533 cluster_end = cluster_start + osb->s_clustersize;
536 BUG_ON(cluster_start > PAGE_SIZE);
537 BUG_ON(cluster_end > PAGE_SIZE);
540 *start = cluster_start;
546 * 'from' and 'to' are the region in the page to avoid zeroing.
548 * If pagesize > clustersize, this function will avoid zeroing outside
549 * of the cluster boundary.
551 * from == to == 0 is code for "zero the entire cluster region"
553 static void ocfs2_clear_page_regions(struct page *page,
554 struct ocfs2_super *osb, u32 cpos,
555 unsigned from, unsigned to)
558 unsigned int cluster_start, cluster_end;
560 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
562 kaddr = kmap_atomic(page);
565 if (from > cluster_start)
566 memset(kaddr + cluster_start, 0, from - cluster_start);
567 if (to < cluster_end)
568 memset(kaddr + to, 0, cluster_end - to);
570 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
573 kunmap_atomic(kaddr);
577 * Nonsparse file systems fully allocate before we get to the write
578 * code. This prevents ocfs2_write() from tagging the write as an
579 * allocating one, which means ocfs2_map_page_blocks() might try to
580 * read-in the blocks at the tail of our file. Avoid reading them by
581 * testing i_size against each block offset.
583 static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
584 unsigned int block_start)
586 u64 offset = page_offset(page) + block_start;
588 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
591 if (i_size_read(inode) > offset)
598 * Some of this taken from __block_write_begin(). We already have our
599 * mapping by now though, and the entire write will be allocating or
600 * it won't, so not much need to use BH_New.
602 * This will also skip zeroing, which is handled externally.
604 int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
605 struct inode *inode, unsigned int from,
606 unsigned int to, int new)
609 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
610 unsigned int block_end, block_start;
611 unsigned int bsize = i_blocksize(inode);
613 if (!page_has_buffers(page))
614 create_empty_buffers(page, bsize, 0);
616 head = page_buffers(page);
617 for (bh = head, block_start = 0; bh != head || !block_start;
618 bh = bh->b_this_page, block_start += bsize) {
619 block_end = block_start + bsize;
621 clear_buffer_new(bh);
624 * Ignore blocks outside of our i/o range -
625 * they may belong to unallocated clusters.
627 if (block_start >= to || block_end <= from) {
628 if (PageUptodate(page))
629 set_buffer_uptodate(bh);
634 * For an allocating write with cluster size >= page
635 * size, we always write the entire page.
640 if (!buffer_mapped(bh)) {
641 map_bh(bh, inode->i_sb, *p_blkno);
642 clean_bdev_bh_alias(bh);
645 if (PageUptodate(page)) {
646 if (!buffer_uptodate(bh))
647 set_buffer_uptodate(bh);
648 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
650 ocfs2_should_read_blk(inode, page, block_start) &&
651 (block_start < from || block_end > to)) {
652 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
656 *p_blkno = *p_blkno + 1;
660 * If we issued read requests - let them complete.
662 while(wait_bh > wait) {
663 wait_on_buffer(*--wait_bh);
664 if (!buffer_uptodate(*wait_bh))
668 if (ret == 0 || !new)
672 * If we get -EIO above, zero out any newly allocated blocks
673 * to avoid exposing stale data.
678 block_end = block_start + bsize;
679 if (block_end <= from)
681 if (block_start >= to)
684 zero_user(page, block_start, bh->b_size);
685 set_buffer_uptodate(bh);
686 mark_buffer_dirty(bh);
689 block_start = block_end;
690 bh = bh->b_this_page;
691 } while (bh != head);
696 #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
697 #define OCFS2_MAX_CTXT_PAGES 1
699 #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
702 #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
704 struct ocfs2_unwritten_extent {
705 struct list_head ue_node;
706 struct list_head ue_ip_node;
712 * Describe the state of a single cluster to be written to.
714 struct ocfs2_write_cluster_desc {
718 * Give this a unique field because c_phys eventually gets
722 unsigned c_clear_unwritten;
723 unsigned c_needs_zero;
726 struct ocfs2_write_ctxt {
727 /* Logical cluster position / len of write */
731 /* First cluster allocated in a nonsparse extend */
732 u32 w_first_new_cpos;
734 /* Type of caller. Must be one of buffer, mmap, direct. */
735 ocfs2_write_type_t w_type;
737 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
740 * This is true if page_size > cluster_size.
742 * It triggers a set of special cases during write which might
743 * have to deal with allocating writes to partial pages.
745 unsigned int w_large_pages;
748 * Pages involved in this write.
750 * w_target_page is the page being written to by the user.
752 * w_pages is an array of pages which always contains
753 * w_target_page, and in the case of an allocating write with
754 * page_size < cluster size, it will contain zero'd and mapped
755 * pages adjacent to w_target_page which need to be written
756 * out in so that future reads from that region will get
759 unsigned int w_num_pages;
760 struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
761 struct page *w_target_page;
764 * w_target_locked is used for page_mkwrite path indicating no unlocking
765 * against w_target_page in ocfs2_write_end_nolock.
767 unsigned int w_target_locked:1;
770 * ocfs2_write_end() uses this to know what the real range to
771 * write in the target should be.
773 unsigned int w_target_from;
774 unsigned int w_target_to;
777 * We could use journal_current_handle() but this is cleaner,
782 struct buffer_head *w_di_bh;
784 struct ocfs2_cached_dealloc_ctxt w_dealloc;
786 struct list_head w_unwritten_list;
787 unsigned int w_unwritten_count;
790 void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
794 for(i = 0; i < num_pages; i++) {
796 unlock_page(pages[i]);
797 mark_page_accessed(pages[i]);
803 static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
808 * w_target_locked is only set to true in the page_mkwrite() case.
809 * The intent is to allow us to lock the target page from write_begin()
810 * to write_end(). The caller must hold a ref on w_target_page.
812 if (wc->w_target_locked) {
813 BUG_ON(!wc->w_target_page);
814 for (i = 0; i < wc->w_num_pages; i++) {
815 if (wc->w_target_page == wc->w_pages[i]) {
816 wc->w_pages[i] = NULL;
820 mark_page_accessed(wc->w_target_page);
821 put_page(wc->w_target_page);
823 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
826 static void ocfs2_free_unwritten_list(struct inode *inode,
827 struct list_head *head)
829 struct ocfs2_inode_info *oi = OCFS2_I(inode);
830 struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL;
832 list_for_each_entry_safe(ue, tmp, head, ue_node) {
833 list_del(&ue->ue_node);
834 spin_lock(&oi->ip_lock);
835 list_del(&ue->ue_ip_node);
836 spin_unlock(&oi->ip_lock);
841 static void ocfs2_free_write_ctxt(struct inode *inode,
842 struct ocfs2_write_ctxt *wc)
844 ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
845 ocfs2_unlock_pages(wc);
850 static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
851 struct ocfs2_super *osb, loff_t pos,
852 unsigned len, ocfs2_write_type_t type,
853 struct buffer_head *di_bh)
856 struct ocfs2_write_ctxt *wc;
858 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
862 wc->w_cpos = pos >> osb->s_clustersize_bits;
863 wc->w_first_new_cpos = UINT_MAX;
864 cend = (pos + len - 1) >> osb->s_clustersize_bits;
865 wc->w_clen = cend - wc->w_cpos + 1;
870 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
871 wc->w_large_pages = 1;
873 wc->w_large_pages = 0;
875 ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
876 INIT_LIST_HEAD(&wc->w_unwritten_list);
884 * If a page has any new buffers, zero them out here, and mark them uptodate
885 * and dirty so they'll be written out (in order to prevent uninitialised
886 * block data from leaking). And clear the new bit.
888 static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
890 unsigned int block_start, block_end;
891 struct buffer_head *head, *bh;
893 BUG_ON(!PageLocked(page));
894 if (!page_has_buffers(page))
897 bh = head = page_buffers(page);
900 block_end = block_start + bh->b_size;
902 if (buffer_new(bh)) {
903 if (block_end > from && block_start < to) {
904 if (!PageUptodate(page)) {
907 start = max(from, block_start);
908 end = min(to, block_end);
910 zero_user_segment(page, start, end);
911 set_buffer_uptodate(bh);
914 clear_buffer_new(bh);
915 mark_buffer_dirty(bh);
919 block_start = block_end;
920 bh = bh->b_this_page;
921 } while (bh != head);
925 * Only called when we have a failure during allocating write to write
926 * zero's to the newly allocated region.
928 static void ocfs2_write_failure(struct inode *inode,
929 struct ocfs2_write_ctxt *wc,
930 loff_t user_pos, unsigned user_len)
933 unsigned from = user_pos & (PAGE_SIZE - 1),
934 to = user_pos + user_len;
935 struct page *tmppage;
937 if (wc->w_target_page)
938 ocfs2_zero_new_buffers(wc->w_target_page, from, to);
940 for(i = 0; i < wc->w_num_pages; i++) {
941 tmppage = wc->w_pages[i];
943 if (tmppage && page_has_buffers(tmppage)) {
944 if (ocfs2_should_order_data(inode))
945 ocfs2_jbd2_file_inode(wc->w_handle, inode);
947 block_commit_write(tmppage, from, to);
952 static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
953 struct ocfs2_write_ctxt *wc,
954 struct page *page, u32 cpos,
955 loff_t user_pos, unsigned user_len,
959 unsigned int map_from = 0, map_to = 0;
960 unsigned int cluster_start, cluster_end;
961 unsigned int user_data_from = 0, user_data_to = 0;
963 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
964 &cluster_start, &cluster_end);
966 /* treat the write as new if the a hole/lseek spanned across
969 new = new | ((i_size_read(inode) <= page_offset(page)) &&
970 (page_offset(page) <= user_pos));
972 if (page == wc->w_target_page) {
973 map_from = user_pos & (PAGE_SIZE - 1);
974 map_to = map_from + user_len;
977 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
978 cluster_start, cluster_end,
981 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
982 map_from, map_to, new);
988 user_data_from = map_from;
989 user_data_to = map_to;
991 map_from = cluster_start;
992 map_to = cluster_end;
996 * If we haven't allocated the new page yet, we
997 * shouldn't be writing it out without copying user
998 * data. This is likely a math error from the caller.
1002 map_from = cluster_start;
1003 map_to = cluster_end;
1005 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1006 cluster_start, cluster_end, new);
1014 * Parts of newly allocated pages need to be zero'd.
1016 * Above, we have also rewritten 'to' and 'from' - as far as
1017 * the rest of the function is concerned, the entire cluster
1018 * range inside of a page needs to be written.
1020 * We can skip this if the page is up to date - it's already
1021 * been zero'd from being read in as a hole.
1023 if (new && !PageUptodate(page))
1024 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
1025 cpos, user_data_from, user_data_to);
1027 flush_dcache_page(page);
1034 * This function will only grab one clusters worth of pages.
1036 static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1037 struct ocfs2_write_ctxt *wc,
1038 u32 cpos, loff_t user_pos,
1039 unsigned user_len, int new,
1040 struct page *mmap_page)
1043 unsigned long start, target_index, end_index, index;
1044 struct inode *inode = mapping->host;
1047 target_index = user_pos >> PAGE_SHIFT;
1050 * Figure out how many pages we'll be manipulating here. For
1051 * non allocating write, we just change the one
1052 * page. Otherwise, we'll need a whole clusters worth. If we're
1053 * writing past i_size, we only need enough pages to cover the
1054 * last page of the write.
1057 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1058 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
1060 * We need the index *past* the last page we could possibly
1061 * touch. This is the page past the end of the write or
1062 * i_size, whichever is greater.
1064 last_byte = max(user_pos + user_len, i_size_read(inode));
1065 BUG_ON(last_byte < 1);
1066 end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
1067 if ((start + wc->w_num_pages) > end_index)
1068 wc->w_num_pages = end_index - start;
1070 wc->w_num_pages = 1;
1071 start = target_index;
1073 end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
1075 for(i = 0; i < wc->w_num_pages; i++) {
1078 if (index >= target_index && index <= end_index &&
1079 wc->w_type == OCFS2_WRITE_MMAP) {
1081 * ocfs2_pagemkwrite() is a little different
1082 * and wants us to directly use the page
1085 lock_page(mmap_page);
1087 /* Exit and let the caller retry */
1088 if (mmap_page->mapping != mapping) {
1089 WARN_ON(mmap_page->mapping);
1090 unlock_page(mmap_page);
1095 get_page(mmap_page);
1096 wc->w_pages[i] = mmap_page;
1097 wc->w_target_locked = true;
1098 } else if (index >= target_index && index <= end_index &&
1099 wc->w_type == OCFS2_WRITE_DIRECT) {
1100 /* Direct write has no mapping page. */
1101 wc->w_pages[i] = NULL;
1104 wc->w_pages[i] = find_or_create_page(mapping, index,
1106 if (!wc->w_pages[i]) {
1112 wait_for_stable_page(wc->w_pages[i]);
1114 if (index == target_index)
1115 wc->w_target_page = wc->w_pages[i];
1119 wc->w_target_locked = false;
1124 * Prepare a single cluster for write one cluster into the file.
1126 static int ocfs2_write_cluster(struct address_space *mapping,
1127 u32 *phys, unsigned int new,
1128 unsigned int clear_unwritten,
1129 unsigned int should_zero,
1130 struct ocfs2_alloc_context *data_ac,
1131 struct ocfs2_alloc_context *meta_ac,
1132 struct ocfs2_write_ctxt *wc, u32 cpos,
1133 loff_t user_pos, unsigned user_len)
1137 struct inode *inode = mapping->host;
1138 struct ocfs2_extent_tree et;
1139 int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1145 * This is safe to call with the page locks - it won't take
1146 * any additional semaphores or cluster locks.
1149 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1150 &tmp_pos, 1, !clear_unwritten,
1151 wc->w_di_bh, wc->w_handle,
1152 data_ac, meta_ac, NULL);
1154 * This shouldn't happen because we must have already
1155 * calculated the correct meta data allocation required. The
1156 * internal tree allocation code should know how to increase
1157 * transaction credits itself.
1159 * If need be, we could handle -EAGAIN for a
1160 * RESTART_TRANS here.
1162 mlog_bug_on_msg(ret == -EAGAIN,
1163 "Inode %llu: EAGAIN return during allocation.\n",
1164 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1169 } else if (clear_unwritten) {
1170 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1172 ret = ocfs2_mark_extent_written(inode, &et,
1173 wc->w_handle, cpos, 1, *phys,
1174 meta_ac, &wc->w_dealloc);
1182 * The only reason this should fail is due to an inability to
1183 * find the extent added.
1185 ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL);
1187 mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
1188 "at logical cluster %u",
1189 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
1195 p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys);
1197 p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
1199 for(i = 0; i < wc->w_num_pages; i++) {
1202 /* This is the direct io target page. */
1203 if (wc->w_pages[i] == NULL) {
1208 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1209 wc->w_pages[i], cpos,
1220 * We only have cleanup to do in case of allocating write.
1223 ocfs2_write_failure(inode, wc, user_pos, user_len);
1230 static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1231 struct ocfs2_alloc_context *data_ac,
1232 struct ocfs2_alloc_context *meta_ac,
1233 struct ocfs2_write_ctxt *wc,
1234 loff_t pos, unsigned len)
1238 unsigned int local_len = len;
1239 struct ocfs2_write_cluster_desc *desc;
1240 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
1242 for (i = 0; i < wc->w_clen; i++) {
1243 desc = &wc->w_desc[i];
1246 * We have to make sure that the total write passed in
1247 * doesn't extend past a single cluster.
1250 cluster_off = pos & (osb->s_clustersize - 1);
1251 if ((cluster_off + local_len) > osb->s_clustersize)
1252 local_len = osb->s_clustersize - cluster_off;
1254 ret = ocfs2_write_cluster(mapping, &desc->c_phys,
1256 desc->c_clear_unwritten,
1259 wc, desc->c_cpos, pos, local_len);
1275 * ocfs2_write_end() wants to know which parts of the target page it
1276 * should complete the write on. It's easiest to compute them ahead of
1277 * time when a more complete view of the write is available.
1279 static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1280 struct ocfs2_write_ctxt *wc,
1281 loff_t pos, unsigned len, int alloc)
1283 struct ocfs2_write_cluster_desc *desc;
1285 wc->w_target_from = pos & (PAGE_SIZE - 1);
1286 wc->w_target_to = wc->w_target_from + len;
1292 * Allocating write - we may have different boundaries based
1293 * on page size and cluster size.
1295 * NOTE: We can no longer compute one value from the other as
1296 * the actual write length and user provided length may be
1300 if (wc->w_large_pages) {
1302 * We only care about the 1st and last cluster within
1303 * our range and whether they should be zero'd or not. Either
1304 * value may be extended out to the start/end of a
1305 * newly allocated cluster.
1307 desc = &wc->w_desc[0];
1308 if (desc->c_needs_zero)
1309 ocfs2_figure_cluster_boundaries(osb,
1314 desc = &wc->w_desc[wc->w_clen - 1];
1315 if (desc->c_needs_zero)
1316 ocfs2_figure_cluster_boundaries(osb,
1321 wc->w_target_from = 0;
1322 wc->w_target_to = PAGE_SIZE;
1327 * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to
1328 * do the zero work. And should not to clear UNWRITTEN since it will be cleared
1329 * by the direct io procedure.
1330 * If this is a new extent that allocated by direct io, we should mark it in
1331 * the ip_unwritten_list.
1333 static int ocfs2_unwritten_check(struct inode *inode,
1334 struct ocfs2_write_ctxt *wc,
1335 struct ocfs2_write_cluster_desc *desc)
1337 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1338 struct ocfs2_unwritten_extent *ue = NULL, *new = NULL;
1341 if (!desc->c_needs_zero)
1345 spin_lock(&oi->ip_lock);
1346 /* Needs not to zero no metter buffer or direct. The one who is zero
1347 * the cluster is doing zero. And he will clear unwritten after all
1348 * cluster io finished. */
1349 list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) {
1350 if (desc->c_cpos == ue->ue_cpos) {
1351 BUG_ON(desc->c_new);
1352 desc->c_needs_zero = 0;
1353 desc->c_clear_unwritten = 0;
1358 if (wc->w_type != OCFS2_WRITE_DIRECT)
1362 spin_unlock(&oi->ip_lock);
1363 new = kmalloc(sizeof(struct ocfs2_unwritten_extent),
1371 /* This direct write will doing zero. */
1372 new->ue_cpos = desc->c_cpos;
1373 new->ue_phys = desc->c_phys;
1374 desc->c_clear_unwritten = 0;
1375 list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list);
1376 list_add_tail(&new->ue_node, &wc->w_unwritten_list);
1377 wc->w_unwritten_count++;
1380 spin_unlock(&oi->ip_lock);
1387 * Populate each single-cluster write descriptor in the write context
1388 * with information about the i/o to be done.
1390 * Returns the number of clusters that will have to be allocated, as
1391 * well as a worst case estimate of the number of extent records that
1392 * would have to be created during a write to an unwritten region.
1394 static int ocfs2_populate_write_desc(struct inode *inode,
1395 struct ocfs2_write_ctxt *wc,
1396 unsigned int *clusters_to_alloc,
1397 unsigned int *extents_to_split)
1400 struct ocfs2_write_cluster_desc *desc;
1401 unsigned int num_clusters = 0;
1402 unsigned int ext_flags = 0;
1406 *clusters_to_alloc = 0;
1407 *extents_to_split = 0;
1409 for (i = 0; i < wc->w_clen; i++) {
1410 desc = &wc->w_desc[i];
1411 desc->c_cpos = wc->w_cpos + i;
1413 if (num_clusters == 0) {
1415 * Need to look up the next extent record.
1417 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
1418 &num_clusters, &ext_flags);
1424 /* We should already CoW the refcountd extent. */
1425 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1428 * Assume worst case - that we're writing in
1429 * the middle of the extent.
1431 * We can assume that the write proceeds from
1432 * left to right, in which case the extent
1433 * insert code is smart enough to coalesce the
1434 * next splits into the previous records created.
1436 if (ext_flags & OCFS2_EXT_UNWRITTEN)
1437 *extents_to_split = *extents_to_split + 2;
1440 * Only increment phys if it doesn't describe
1447 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1448 * file that got extended. w_first_new_cpos tells us
1449 * where the newly allocated clusters are so we can
1452 if (desc->c_cpos >= wc->w_first_new_cpos) {
1454 desc->c_needs_zero = 1;
1457 desc->c_phys = phys;
1460 desc->c_needs_zero = 1;
1461 desc->c_clear_unwritten = 1;
1462 *clusters_to_alloc = *clusters_to_alloc + 1;
1465 if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1466 desc->c_clear_unwritten = 1;
1467 desc->c_needs_zero = 1;
1470 ret = ocfs2_unwritten_check(inode, wc, desc);
1484 static int ocfs2_write_begin_inline(struct address_space *mapping,
1485 struct inode *inode,
1486 struct ocfs2_write_ctxt *wc)
1489 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1492 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1494 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1495 if (IS_ERR(handle)) {
1496 ret = PTR_ERR(handle);
1501 page = find_or_create_page(mapping, 0, GFP_NOFS);
1503 ocfs2_commit_trans(osb, handle);
1509 * If we don't set w_num_pages then this page won't get unlocked
1510 * and freed on cleanup of the write context.
1512 wc->w_pages[0] = wc->w_target_page = page;
1513 wc->w_num_pages = 1;
1515 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1516 OCFS2_JOURNAL_ACCESS_WRITE);
1518 ocfs2_commit_trans(osb, handle);
1524 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1525 ocfs2_set_inode_data_inline(inode, di);
1527 if (!PageUptodate(page)) {
1528 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1530 ocfs2_commit_trans(osb, handle);
1536 wc->w_handle = handle;
1541 int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1543 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1545 if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
1550 static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1551 struct inode *inode, loff_t pos,
1552 unsigned len, struct page *mmap_page,
1553 struct ocfs2_write_ctxt *wc)
1555 int ret, written = 0;
1556 loff_t end = pos + len;
1557 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1558 struct ocfs2_dinode *di = NULL;
1560 trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1561 len, (unsigned long long)pos,
1562 oi->ip_dyn_features);
1565 * Handle inodes which already have inline data 1st.
1567 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1568 if (mmap_page == NULL &&
1569 ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1570 goto do_inline_write;
1573 * The write won't fit - we have to give this inode an
1574 * inline extent list now.
1576 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1583 * Check whether the inode can accept inline data.
1585 if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1589 * Check whether the write can fit.
1591 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1593 end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
1597 ret = ocfs2_write_begin_inline(mapping, inode, wc);
1604 * This signals to the caller that the data can be written
1609 return written ? written : ret;
1613 * This function only does anything for file systems which can't
1614 * handle sparse files.
1616 * What we want to do here is fill in any hole between the current end
1617 * of allocation and the end of our write. That way the rest of the
1618 * write path can treat it as an non-allocating write, which has no
1619 * special case code for sparse/nonsparse files.
1621 static int ocfs2_expand_nonsparse_inode(struct inode *inode,
1622 struct buffer_head *di_bh,
1623 loff_t pos, unsigned len,
1624 struct ocfs2_write_ctxt *wc)
1627 loff_t newsize = pos + len;
1629 BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1631 if (newsize <= i_size_read(inode))
1634 ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
1638 /* There is no wc if this is call from direct. */
1640 wc->w_first_new_cpos =
1641 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1646 static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
1651 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1652 if (pos > i_size_read(inode))
1653 ret = ocfs2_zero_extend(inode, di_bh, pos);
1658 int ocfs2_write_begin_nolock(struct address_space *mapping,
1659 loff_t pos, unsigned len, ocfs2_write_type_t type,
1660 struct page **pagep, void **fsdata,
1661 struct buffer_head *di_bh, struct page *mmap_page)
1663 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1664 unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
1665 struct ocfs2_write_ctxt *wc;
1666 struct inode *inode = mapping->host;
1667 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1668 struct ocfs2_dinode *di;
1669 struct ocfs2_alloc_context *data_ac = NULL;
1670 struct ocfs2_alloc_context *meta_ac = NULL;
1672 struct ocfs2_extent_tree et;
1673 int try_free = 1, ret1;
1676 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh);
1682 if (ocfs2_supports_inline_data(osb)) {
1683 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
1695 /* Direct io change i_size late, should not zero tail here. */
1696 if (type != OCFS2_WRITE_DIRECT) {
1697 if (ocfs2_sparse_alloc(osb))
1698 ret = ocfs2_zero_tail(inode, di_bh, pos);
1700 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
1708 ret = ocfs2_check_range_for_refcount(inode, pos, len);
1712 } else if (ret == 1) {
1713 clusters_need = wc->w_clen;
1714 ret = ocfs2_refcount_cow(inode, di_bh,
1715 wc->w_cpos, wc->w_clen, UINT_MAX);
1722 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
1728 clusters_need += clusters_to_alloc;
1730 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1732 trace_ocfs2_write_begin_nolock(
1733 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1734 (long long)i_size_read(inode),
1735 le32_to_cpu(di->i_clusters),
1736 pos, len, type, mmap_page,
1737 clusters_to_alloc, extents_to_split);
1740 * We set w_target_from, w_target_to here so that
1741 * ocfs2_write_end() knows which range in the target page to
1742 * write out. An allocation requires that we write the entire
1745 if (clusters_to_alloc || extents_to_split) {
1747 * XXX: We are stretching the limits of
1748 * ocfs2_lock_allocators(). It greatly over-estimates
1749 * the work to be done.
1751 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1753 ret = ocfs2_lock_allocators(inode, &et,
1754 clusters_to_alloc, extents_to_split,
1755 &data_ac, &meta_ac);
1762 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
1764 credits = ocfs2_calc_extend_credits(inode->i_sb,
1766 } else if (type == OCFS2_WRITE_DIRECT)
1767 /* direct write needs not to start trans if no extents alloc. */
1771 * We have to zero sparse allocated clusters, unwritten extent clusters,
1772 * and non-sparse clusters we just extended. For non-sparse writes,
1773 * we know zeros will only be needed in the first and/or last cluster.
1775 if (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1776 wc->w_desc[wc->w_clen - 1].c_needs_zero))
1777 cluster_of_pages = 1;
1779 cluster_of_pages = 0;
1781 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1783 handle = ocfs2_start_trans(osb, credits);
1784 if (IS_ERR(handle)) {
1785 ret = PTR_ERR(handle);
1790 wc->w_handle = handle;
1792 if (clusters_to_alloc) {
1793 ret = dquot_alloc_space_nodirty(inode,
1794 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1799 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1800 OCFS2_JOURNAL_ACCESS_WRITE);
1807 * Fill our page array first. That way we've grabbed enough so
1808 * that we can zero and flush if we error after adding the
1811 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
1812 cluster_of_pages, mmap_page);
1813 if (ret && ret != -EAGAIN) {
1819 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
1820 * the target page. In this case, we exit with no error and no target
1821 * page. This will trigger the caller, page_mkwrite(), to re-try
1824 if (ret == -EAGAIN) {
1825 BUG_ON(wc->w_target_page);
1830 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1838 ocfs2_free_alloc_context(data_ac);
1840 ocfs2_free_alloc_context(meta_ac);
1844 *pagep = wc->w_target_page;
1848 if (clusters_to_alloc)
1849 dquot_free_space(inode,
1850 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1852 ocfs2_commit_trans(osb, handle);
1856 * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
1857 * even in case of error here like ENOSPC and ENOMEM. So, we need
1858 * to unlock the target page manually to prevent deadlocks when
1859 * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
1862 if (wc->w_target_locked)
1863 unlock_page(mmap_page);
1865 ocfs2_free_write_ctxt(inode, wc);
1868 ocfs2_free_alloc_context(data_ac);
1872 ocfs2_free_alloc_context(meta_ac);
1876 if (ret == -ENOSPC && try_free) {
1878 * Try to free some truncate log so that we can have enough
1879 * clusters to allocate.
1883 ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
1894 static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
1895 loff_t pos, unsigned len, unsigned flags,
1896 struct page **pagep, void **fsdata)
1899 struct buffer_head *di_bh = NULL;
1900 struct inode *inode = mapping->host;
1902 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1909 * Take alloc sem here to prevent concurrent lookups. That way
1910 * the mapping, zeroing and tree manipulation within
1911 * ocfs2_write() will be safe against ->readpage(). This
1912 * should also serve to lock out allocation from a shared
1915 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1917 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
1918 pagep, fsdata, di_bh, NULL);
1929 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1932 ocfs2_inode_unlock(inode, 1);
1937 static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1938 unsigned len, unsigned *copied,
1939 struct ocfs2_dinode *di,
1940 struct ocfs2_write_ctxt *wc)
1944 if (unlikely(*copied < len)) {
1945 if (!PageUptodate(wc->w_target_page)) {
1951 kaddr = kmap_atomic(wc->w_target_page);
1952 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
1953 kunmap_atomic(kaddr);
1955 trace_ocfs2_write_end_inline(
1956 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1957 (unsigned long long)pos, *copied,
1958 le16_to_cpu(di->id2.i_data.id_count),
1959 le16_to_cpu(di->i_dyn_features));
1962 int ocfs2_write_end_nolock(struct address_space *mapping,
1963 loff_t pos, unsigned len, unsigned copied, void *fsdata)
1966 unsigned from, to, start = pos & (PAGE_SIZE - 1);
1967 struct inode *inode = mapping->host;
1968 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1969 struct ocfs2_write_ctxt *wc = fsdata;
1970 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1971 handle_t *handle = wc->w_handle;
1972 struct page *tmppage;
1974 BUG_ON(!list_empty(&wc->w_unwritten_list));
1977 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
1978 wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
1986 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1987 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
1988 goto out_write_size;
1991 if (unlikely(copied < len) && wc->w_target_page) {
1992 if (!PageUptodate(wc->w_target_page))
1995 ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
1998 if (wc->w_target_page)
1999 flush_dcache_page(wc->w_target_page);
2001 for(i = 0; i < wc->w_num_pages; i++) {
2002 tmppage = wc->w_pages[i];
2004 /* This is the direct io target page. */
2005 if (tmppage == NULL)
2008 if (tmppage == wc->w_target_page) {
2009 from = wc->w_target_from;
2010 to = wc->w_target_to;
2012 BUG_ON(from > PAGE_SIZE ||
2017 * Pages adjacent to the target (if any) imply
2018 * a hole-filling write in which case we want
2019 * to flush their entire range.
2025 if (page_has_buffers(tmppage)) {
2026 if (handle && ocfs2_should_order_data(inode))
2027 ocfs2_jbd2_file_inode(handle, inode);
2028 block_commit_write(tmppage, from, to);
2033 /* Direct io do not update i_size here. */
2034 if (wc->w_type != OCFS2_WRITE_DIRECT) {
2036 if (pos > i_size_read(inode)) {
2037 i_size_write(inode, pos);
2038 mark_inode_dirty(inode);
2040 inode->i_blocks = ocfs2_inode_sector_count(inode);
2041 di->i_size = cpu_to_le64((u64)i_size_read(inode));
2042 inode->i_mtime = inode->i_ctime = current_time(inode);
2043 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
2044 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
2045 ocfs2_update_inode_fsync_trans(handle, inode, 1);
2048 ocfs2_journal_dirty(handle, wc->w_di_bh);
2051 /* unlock pages before dealloc since it needs acquiring j_trans_barrier
2052 * lock, or it will cause a deadlock since journal commit threads holds
2053 * this lock and will ask for the page lock when flushing the data.
2054 * put it here to preserve the unlock order.
2056 ocfs2_unlock_pages(wc);
2059 ocfs2_commit_trans(osb, handle);
2061 ocfs2_run_deallocs(osb, &wc->w_dealloc);
2063 brelse(wc->w_di_bh);
2069 static int ocfs2_write_end(struct file *file, struct address_space *mapping,
2070 loff_t pos, unsigned len, unsigned copied,
2071 struct page *page, void *fsdata)
2074 struct inode *inode = mapping->host;
2076 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata);
2078 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2079 ocfs2_inode_unlock(inode, 1);
2084 struct ocfs2_dio_write_ctxt {
2085 struct list_head dw_zero_list;
2086 unsigned dw_zero_count;
2088 pid_t dw_writer_pid;
2091 static struct ocfs2_dio_write_ctxt *
2092 ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc)
2094 struct ocfs2_dio_write_ctxt *dwc = NULL;
2097 return bh->b_private;
2099 dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS);
2102 INIT_LIST_HEAD(&dwc->dw_zero_list);
2103 dwc->dw_zero_count = 0;
2104 dwc->dw_orphaned = 0;
2105 dwc->dw_writer_pid = task_pid_nr(current);
2106 bh->b_private = dwc;
2112 static void ocfs2_dio_free_write_ctx(struct inode *inode,
2113 struct ocfs2_dio_write_ctxt *dwc)
2115 ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list);
2120 * TODO: Make this into a generic get_blocks function.
2122 * From do_direct_io in direct-io.c:
2123 * "So what we do is to permit the ->get_blocks function to populate
2124 * bh.b_size with the size of IO which is permitted at this offset and
2127 * This function is called directly from get_more_blocks in direct-io.c.
2129 * called like this: dio->get_blocks(dio->inode, fs_startblk,
2130 * fs_count, map_bh, dio->rw == WRITE);
2132 static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
2133 struct buffer_head *bh_result, int create)
2135 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2136 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2137 struct ocfs2_write_ctxt *wc;
2138 struct ocfs2_write_cluster_desc *desc = NULL;
2139 struct ocfs2_dio_write_ctxt *dwc = NULL;
2140 struct buffer_head *di_bh = NULL;
2142 loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
2143 unsigned len, total_len = bh_result->b_size;
2144 int ret = 0, first_get_block = 0;
2146 len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
2147 len = min(total_len, len);
2149 mlog(0, "get block of %lu at %llu:%u req %u\n",
2150 inode->i_ino, pos, len, total_len);
2153 * Because we need to change file size in ocfs2_dio_end_io_write(), or
2154 * we may need to add it to orphan dir. So can not fall to fast path
2155 * while file size will be changed.
2157 if (pos + total_len <= i_size_read(inode)) {
2159 /* This is the fast path for re-write. */
2160 ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
2161 if (buffer_mapped(bh_result) &&
2162 !buffer_new(bh_result) &&
2166 /* Clear state set by ocfs2_get_block. */
2167 bh_result->b_state = 0;
2170 dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block);
2171 if (unlikely(dwc == NULL)) {
2177 if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) >
2178 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) &&
2179 !dwc->dw_orphaned) {
2181 * when we are going to alloc extents beyond file size, add the
2182 * inode to orphan dir, so we can recall those spaces when
2183 * system crashed during write.
2185 ret = ocfs2_add_inode_to_orphan(osb, inode);
2190 dwc->dw_orphaned = 1;
2193 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2199 down_write(&oi->ip_alloc_sem);
2201 if (first_get_block) {
2202 if (ocfs2_sparse_alloc(osb))
2203 ret = ocfs2_zero_tail(inode, di_bh, pos);
2205 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
2213 ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len,
2214 OCFS2_WRITE_DIRECT, NULL,
2215 (void **)&wc, di_bh, NULL);
2221 desc = &wc->w_desc[0];
2223 p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys);
2224 BUG_ON(p_blkno == 0);
2225 p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1);
2227 map_bh(bh_result, inode->i_sb, p_blkno);
2228 bh_result->b_size = len;
2229 if (desc->c_needs_zero)
2230 set_buffer_new(bh_result);
2232 /* May sleep in end_io. It should not happen in a irq context. So defer
2233 * it to dio work queue. */
2234 set_buffer_defer_completion(bh_result);
2236 if (!list_empty(&wc->w_unwritten_list)) {
2237 struct ocfs2_unwritten_extent *ue = NULL;
2239 ue = list_first_entry(&wc->w_unwritten_list,
2240 struct ocfs2_unwritten_extent,
2242 BUG_ON(ue->ue_cpos != desc->c_cpos);
2243 /* The physical address may be 0, fill it. */
2244 ue->ue_phys = desc->c_phys;
2246 list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
2247 dwc->dw_zero_count += wc->w_unwritten_count;
2250 ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
2254 up_write(&oi->ip_alloc_sem);
2255 ocfs2_inode_unlock(inode, 1);
2263 static int ocfs2_dio_end_io_write(struct inode *inode,
2264 struct ocfs2_dio_write_ctxt *dwc,
2268 struct ocfs2_cached_dealloc_ctxt dealloc;
2269 struct ocfs2_extent_tree et;
2270 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2271 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2272 struct ocfs2_unwritten_extent *ue = NULL;
2273 struct buffer_head *di_bh = NULL;
2274 struct ocfs2_dinode *di;
2275 struct ocfs2_alloc_context *data_ac = NULL;
2276 struct ocfs2_alloc_context *meta_ac = NULL;
2277 handle_t *handle = NULL;
2278 loff_t end = offset + bytes;
2279 int ret = 0, credits = 0, locked = 0;
2281 ocfs2_init_dealloc_ctxt(&dealloc);
2283 /* We do clear unwritten, delete orphan, change i_size here. If neither
2284 * of these happen, we can skip all this. */
2285 if (list_empty(&dwc->dw_zero_list) &&
2286 end <= i_size_read(inode) &&
2290 /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
2291 * are in that context. */
2292 if (dwc->dw_writer_pid != task_pid_nr(current)) {
2297 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2303 down_write(&oi->ip_alloc_sem);
2305 /* Delete orphan before acquire i_mutex. */
2306 if (dwc->dw_orphaned) {
2307 BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
2309 end = end > i_size_read(inode) ? end : 0;
2311 ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
2317 di = (struct ocfs2_dinode *)di_bh->b_data;
2319 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
2321 /* Attach dealloc with extent tree in case that we may reuse extents
2322 * which are already unlinked from current extent tree due to extent
2323 * rotation and merging.
2325 et.et_dealloc = &dealloc;
2327 ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2,
2328 &data_ac, &meta_ac);
2334 credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list);
2336 handle = ocfs2_start_trans(osb, credits);
2337 if (IS_ERR(handle)) {
2338 ret = PTR_ERR(handle);
2342 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2343 OCFS2_JOURNAL_ACCESS_WRITE);
2349 list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
2350 ret = ocfs2_mark_extent_written(inode, &et, handle,
2360 if (end > i_size_read(inode)) {
2361 ret = ocfs2_set_inode_size(handle, inode, di_bh, end);
2366 ocfs2_commit_trans(osb, handle);
2368 up_write(&oi->ip_alloc_sem);
2369 ocfs2_inode_unlock(inode, 1);
2373 ocfs2_free_alloc_context(data_ac);
2375 ocfs2_free_alloc_context(meta_ac);
2376 ocfs2_run_deallocs(osb, &dealloc);
2378 inode_unlock(inode);
2379 ocfs2_dio_free_write_ctx(inode, dwc);
2385 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
2386 * particularly interested in the aio/dio case. We use the rw_lock DLM lock
2387 * to protect io on one node from truncation on another.
2389 static int ocfs2_dio_end_io(struct kiocb *iocb,
2394 struct inode *inode = file_inode(iocb->ki_filp);
2398 /* this io's submitter should not have unlocked this before we could */
2399 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
2402 mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
2406 ret = ocfs2_dio_end_io_write(inode, private, offset,
2409 ocfs2_dio_free_write_ctx(inode, private);
2412 ocfs2_iocb_clear_rw_locked(iocb);
2414 level = ocfs2_iocb_rw_locked_level(iocb);
2415 ocfs2_rw_unlock(inode, level);
2419 static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2421 struct file *file = iocb->ki_filp;
2422 struct inode *inode = file->f_mapping->host;
2423 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2424 get_block_t *get_block;
2427 * Fallback to buffered I/O if we see an inode without
2430 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2433 /* Fallback to buffered I/O if we do not support append dio. */
2434 if (iocb->ki_pos + iter->count > i_size_read(inode) &&
2435 !ocfs2_supports_append_dio(osb))
2438 if (iov_iter_rw(iter) == READ)
2439 get_block = ocfs2_lock_get_block;
2441 get_block = ocfs2_dio_wr_get_block;
2443 return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2445 ocfs2_dio_end_io, NULL, 0);
2448 const struct address_space_operations ocfs2_aops = {
2449 .readpage = ocfs2_readpage,
2450 .readpages = ocfs2_readpages,
2451 .writepage = ocfs2_writepage,
2452 .write_begin = ocfs2_write_begin,
2453 .write_end = ocfs2_write_end,
2455 .direct_IO = ocfs2_direct_IO,
2456 .invalidatepage = block_invalidatepage,
2457 .releasepage = ocfs2_releasepage,
2458 .migratepage = buffer_migrate_page,
2459 .is_partially_uptodate = block_is_partially_uptodate,
2460 .error_remove_page = generic_error_remove_page,