1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
31 #include <trace/events/f2fs.h>
33 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
35 struct inode *inode = file_inode(vmf->vma->vm_file);
38 down_read(&F2FS_I(inode)->i_mmap_sem);
39 ret = filemap_fault(vmf);
40 up_read(&F2FS_I(inode)->i_mmap_sem);
42 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
47 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
49 struct page *page = vmf->page;
50 struct inode *inode = file_inode(vmf->vma->vm_file);
51 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
52 struct dnode_of_data dn = { .node_changed = false };
55 if (unlikely(f2fs_cp_error(sbi))) {
59 err = f2fs_is_checkpoint_ready(sbi);
63 sb_start_pagefault(inode->i_sb);
65 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
67 file_update_time(vmf->vma->vm_file);
68 down_read(&F2FS_I(inode)->i_mmap_sem);
70 if (unlikely(page->mapping != inode->i_mapping ||
71 page_offset(page) > i_size_read(inode) ||
72 !PageUptodate(page))) {
78 /* block allocation */
79 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
80 set_new_dnode(&dn, inode, NULL, NULL, 0);
81 err = f2fs_get_block(&dn, page->index);
83 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
90 f2fs_wait_on_page_writeback(page, DATA, false, true);
92 /* wait for GCed page writeback via META_MAPPING */
93 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
96 * check to see if the page is mapped already (no holes)
98 if (PageMappedToDisk(page))
101 /* page is wholly or partially inside EOF */
102 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
103 i_size_read(inode)) {
106 offset = i_size_read(inode) & ~PAGE_MASK;
107 zero_user_segment(page, offset, PAGE_SIZE);
109 set_page_dirty(page);
110 if (!PageUptodate(page))
111 SetPageUptodate(page);
113 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
114 f2fs_update_time(sbi, REQ_TIME);
116 trace_f2fs_vm_page_mkwrite(page, DATA);
118 up_read(&F2FS_I(inode)->i_mmap_sem);
120 f2fs_balance_fs(sbi, dn.node_changed);
122 sb_end_pagefault(inode->i_sb);
124 return block_page_mkwrite_return(err);
127 static const struct vm_operations_struct f2fs_file_vm_ops = {
128 .fault = f2fs_filemap_fault,
129 .map_pages = filemap_map_pages,
130 .page_mkwrite = f2fs_vm_page_mkwrite,
133 static int get_parent_ino(struct inode *inode, nid_t *pino)
135 struct dentry *dentry;
137 inode = igrab(inode);
138 dentry = d_find_any_alias(inode);
143 *pino = parent_ino(dentry);
148 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
150 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
151 enum cp_reason_type cp_reason = CP_NO_NEEDED;
153 if (!S_ISREG(inode->i_mode))
154 cp_reason = CP_NON_REGULAR;
155 else if (inode->i_nlink != 1)
156 cp_reason = CP_HARDLINK;
157 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
158 cp_reason = CP_SB_NEED_CP;
159 else if (file_wrong_pino(inode))
160 cp_reason = CP_WRONG_PINO;
161 else if (!f2fs_space_for_roll_forward(sbi))
162 cp_reason = CP_NO_SPC_ROLL;
163 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
164 cp_reason = CP_NODE_NEED_CP;
165 else if (test_opt(sbi, FASTBOOT))
166 cp_reason = CP_FASTBOOT_MODE;
167 else if (F2FS_OPTION(sbi).active_logs == 2)
168 cp_reason = CP_SPEC_LOG_NUM;
169 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
170 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
171 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
173 cp_reason = CP_RECOVER_DIR;
178 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
180 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
182 /* But we need to avoid that there are some inode updates */
183 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
189 static void try_to_fix_pino(struct inode *inode)
191 struct f2fs_inode_info *fi = F2FS_I(inode);
194 down_write(&fi->i_sem);
195 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
196 get_parent_ino(inode, &pino)) {
197 f2fs_i_pino_write(inode, pino);
198 file_got_pino(inode);
200 up_write(&fi->i_sem);
203 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
204 int datasync, bool atomic)
206 struct inode *inode = file->f_mapping->host;
207 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
208 nid_t ino = inode->i_ino;
210 enum cp_reason_type cp_reason = 0;
211 struct writeback_control wbc = {
212 .sync_mode = WB_SYNC_ALL,
213 .nr_to_write = LONG_MAX,
216 unsigned int seq_id = 0;
218 if (unlikely(f2fs_readonly(inode->i_sb) ||
219 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
222 trace_f2fs_sync_file_enter(inode);
224 if (S_ISDIR(inode->i_mode))
227 /* if fdatasync is triggered, let's do in-place-update */
228 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
229 set_inode_flag(inode, FI_NEED_IPU);
230 ret = file_write_and_wait_range(file, start, end);
231 clear_inode_flag(inode, FI_NEED_IPU);
234 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
238 /* if the inode is dirty, let's recover all the time */
239 if (!f2fs_skip_inode_update(inode, datasync)) {
240 f2fs_write_inode(inode, NULL);
245 * if there is no written data, don't waste time to write recovery info.
247 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
248 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
250 /* it may call write_inode just prior to fsync */
251 if (need_inode_page_update(sbi, ino))
254 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
255 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
261 * Both of fdatasync() and fsync() are able to be recovered from
264 down_read(&F2FS_I(inode)->i_sem);
265 cp_reason = need_do_checkpoint(inode);
266 up_read(&F2FS_I(inode)->i_sem);
269 /* all the dirty node pages should be flushed for POR */
270 ret = f2fs_sync_fs(inode->i_sb, 1);
273 * We've secured consistency through sync_fs. Following pino
274 * will be used only for fsynced inodes after checkpoint.
276 try_to_fix_pino(inode);
277 clear_inode_flag(inode, FI_APPEND_WRITE);
278 clear_inode_flag(inode, FI_UPDATE_WRITE);
282 atomic_inc(&sbi->wb_sync_req[NODE]);
283 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
284 atomic_dec(&sbi->wb_sync_req[NODE]);
288 /* if cp_error was enabled, we should avoid infinite loop */
289 if (unlikely(f2fs_cp_error(sbi))) {
294 if (f2fs_need_inode_block_update(sbi, ino)) {
295 f2fs_mark_inode_dirty_sync(inode, true);
296 f2fs_write_inode(inode, NULL);
301 * If it's atomic_write, it's just fine to keep write ordering. So
302 * here we don't need to wait for node write completion, since we use
303 * node chain which serializes node blocks. If one of node writes are
304 * reordered, we can see simply broken chain, resulting in stopping
305 * roll-forward recovery. It means we'll recover all or none node blocks
309 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
314 /* once recovery info is written, don't need to tack this */
315 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
316 clear_inode_flag(inode, FI_APPEND_WRITE);
318 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
319 ret = f2fs_issue_flush(sbi, inode->i_ino);
321 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
322 clear_inode_flag(inode, FI_UPDATE_WRITE);
323 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
325 f2fs_update_time(sbi, REQ_TIME);
327 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
328 f2fs_trace_ios(NULL, 1);
332 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
334 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
336 return f2fs_do_sync_file(file, start, end, datasync, false);
339 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
340 pgoff_t pgofs, int whence)
345 if (whence != SEEK_DATA)
348 /* find first dirty page index */
349 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
358 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
359 pgoff_t dirty, pgoff_t pgofs, int whence)
363 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
364 __is_valid_data_blkaddr(blkaddr))
368 if (blkaddr == NULL_ADDR)
375 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
377 struct inode *inode = file->f_mapping->host;
378 loff_t maxbytes = inode->i_sb->s_maxbytes;
379 struct dnode_of_data dn;
380 pgoff_t pgofs, end_offset, dirty;
381 loff_t data_ofs = offset;
387 isize = i_size_read(inode);
391 /* handle inline data case */
392 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
393 if (whence == SEEK_HOLE)
398 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
400 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
402 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
403 set_new_dnode(&dn, inode, NULL, NULL, 0);
404 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
405 if (err && err != -ENOENT) {
407 } else if (err == -ENOENT) {
408 /* direct node does not exists */
409 if (whence == SEEK_DATA) {
410 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
417 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
419 /* find data/hole in dnode block */
420 for (; dn.ofs_in_node < end_offset;
421 dn.ofs_in_node++, pgofs++,
422 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
425 blkaddr = datablock_addr(dn.inode,
426 dn.node_page, dn.ofs_in_node);
428 if (__is_valid_data_blkaddr(blkaddr) &&
429 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
430 blkaddr, DATA_GENERIC_ENHANCE)) {
435 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
444 if (whence == SEEK_DATA)
447 if (whence == SEEK_HOLE && data_ofs > isize)
450 return vfs_setpos(file, data_ofs, maxbytes);
456 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
458 struct inode *inode = file->f_mapping->host;
459 loff_t maxbytes = inode->i_sb->s_maxbytes;
465 return generic_file_llseek_size(file, offset, whence,
466 maxbytes, i_size_read(inode));
471 return f2fs_seek_block(file, offset, whence);
477 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
479 struct inode *inode = file_inode(file);
482 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
485 /* we don't need to use inline_data strictly */
486 err = f2fs_convert_inline_inode(inode);
491 vma->vm_ops = &f2fs_file_vm_ops;
495 static int f2fs_file_open(struct inode *inode, struct file *filp)
497 int err = fscrypt_file_open(inode, filp);
502 filp->f_mode |= FMODE_NOWAIT;
504 return dquot_file_open(inode, filp);
507 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
509 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
510 struct f2fs_node *raw_node;
511 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
515 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
516 base = get_extra_isize(dn->inode);
518 raw_node = F2FS_NODE(dn->node_page);
519 addr = blkaddr_in_node(raw_node) + base + ofs;
521 for (; count > 0; count--, addr++, dn->ofs_in_node++) {
522 block_t blkaddr = le32_to_cpu(*addr);
524 if (blkaddr == NULL_ADDR)
527 dn->data_blkaddr = NULL_ADDR;
528 f2fs_set_data_blkaddr(dn);
530 if (__is_valid_data_blkaddr(blkaddr) &&
531 !f2fs_is_valid_blkaddr(sbi, blkaddr,
532 DATA_GENERIC_ENHANCE))
535 f2fs_invalidate_blocks(sbi, blkaddr);
536 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
537 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
544 * once we invalidate valid blkaddr in range [ofs, ofs + count],
545 * we will invalidate all blkaddr in the whole range.
547 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
549 f2fs_update_extent_cache_range(dn, fofs, 0, len);
550 dec_valid_block_count(sbi, dn->inode, nr_free);
552 dn->ofs_in_node = ofs;
554 f2fs_update_time(sbi, REQ_TIME);
555 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
556 dn->ofs_in_node, nr_free);
559 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
561 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
564 static int truncate_partial_data_page(struct inode *inode, u64 from,
567 loff_t offset = from & (PAGE_SIZE - 1);
568 pgoff_t index = from >> PAGE_SHIFT;
569 struct address_space *mapping = inode->i_mapping;
572 if (!offset && !cache_only)
576 page = find_lock_page(mapping, index);
577 if (page && PageUptodate(page))
579 f2fs_put_page(page, 1);
583 page = f2fs_get_lock_data_page(inode, index, true);
585 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
587 f2fs_wait_on_page_writeback(page, DATA, true, true);
588 zero_user(page, offset, PAGE_SIZE - offset);
590 /* An encrypted inode should have a key and truncate the last page. */
591 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
593 set_page_dirty(page);
594 f2fs_put_page(page, 1);
598 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
600 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
601 struct dnode_of_data dn;
603 int count = 0, err = 0;
605 bool truncate_page = false;
607 trace_f2fs_truncate_blocks_enter(inode, from);
609 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
611 if (free_from >= sbi->max_file_blocks)
617 ipage = f2fs_get_node_page(sbi, inode->i_ino);
619 err = PTR_ERR(ipage);
623 if (f2fs_has_inline_data(inode)) {
624 f2fs_truncate_inline_inode(inode, ipage, from);
625 f2fs_put_page(ipage, 1);
626 truncate_page = true;
630 set_new_dnode(&dn, inode, ipage, NULL, 0);
631 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
638 count = ADDRS_PER_PAGE(dn.node_page, inode);
640 count -= dn.ofs_in_node;
641 f2fs_bug_on(sbi, count < 0);
643 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
644 f2fs_truncate_data_blocks_range(&dn, count);
650 err = f2fs_truncate_inode_blocks(inode, free_from);
655 /* lastly zero out the first data page */
657 err = truncate_partial_data_page(inode, from, truncate_page);
659 trace_f2fs_truncate_blocks_exit(inode, err);
663 int f2fs_truncate(struct inode *inode)
667 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
670 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
671 S_ISLNK(inode->i_mode)))
674 trace_f2fs_truncate(inode);
676 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
677 f2fs_show_injection_info(FAULT_TRUNCATE);
681 /* we should check inline_data size */
682 if (!f2fs_may_inline_data(inode)) {
683 err = f2fs_convert_inline_inode(inode);
688 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
692 inode->i_mtime = inode->i_ctime = current_time(inode);
693 f2fs_mark_inode_dirty_sync(inode, false);
697 int f2fs_getattr(const struct path *path, struct kstat *stat,
698 u32 request_mask, unsigned int query_flags)
700 struct inode *inode = d_inode(path->dentry);
701 struct f2fs_inode_info *fi = F2FS_I(inode);
702 struct f2fs_inode *ri;
705 if (f2fs_has_extra_attr(inode) &&
706 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
707 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
708 stat->result_mask |= STATX_BTIME;
709 stat->btime.tv_sec = fi->i_crtime.tv_sec;
710 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
714 if (flags & F2FS_APPEND_FL)
715 stat->attributes |= STATX_ATTR_APPEND;
716 if (IS_ENCRYPTED(inode))
717 stat->attributes |= STATX_ATTR_ENCRYPTED;
718 if (flags & F2FS_IMMUTABLE_FL)
719 stat->attributes |= STATX_ATTR_IMMUTABLE;
720 if (flags & F2FS_NODUMP_FL)
721 stat->attributes |= STATX_ATTR_NODUMP;
723 stat->attributes_mask |= (STATX_ATTR_APPEND |
724 STATX_ATTR_ENCRYPTED |
725 STATX_ATTR_IMMUTABLE |
728 generic_fillattr(inode, stat);
730 /* we need to show initial sectors used for inline_data/dentries */
731 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
732 f2fs_has_inline_dentry(inode))
733 stat->blocks += (stat->size + 511) >> 9;
738 #ifdef CONFIG_F2FS_FS_POSIX_ACL
739 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
741 unsigned int ia_valid = attr->ia_valid;
743 if (ia_valid & ATTR_UID)
744 inode->i_uid = attr->ia_uid;
745 if (ia_valid & ATTR_GID)
746 inode->i_gid = attr->ia_gid;
747 if (ia_valid & ATTR_ATIME)
748 inode->i_atime = timespec64_trunc(attr->ia_atime,
749 inode->i_sb->s_time_gran);
750 if (ia_valid & ATTR_MTIME)
751 inode->i_mtime = timespec64_trunc(attr->ia_mtime,
752 inode->i_sb->s_time_gran);
753 if (ia_valid & ATTR_CTIME)
754 inode->i_ctime = timespec64_trunc(attr->ia_ctime,
755 inode->i_sb->s_time_gran);
756 if (ia_valid & ATTR_MODE) {
757 umode_t mode = attr->ia_mode;
759 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
761 set_acl_inode(inode, mode);
765 #define __setattr_copy setattr_copy
768 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
770 struct inode *inode = d_inode(dentry);
773 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
776 err = setattr_prepare(dentry, attr);
780 err = fscrypt_prepare_setattr(dentry, attr);
784 if (is_quota_modification(inode, attr)) {
785 err = dquot_initialize(inode);
789 if ((attr->ia_valid & ATTR_UID &&
790 !uid_eq(attr->ia_uid, inode->i_uid)) ||
791 (attr->ia_valid & ATTR_GID &&
792 !gid_eq(attr->ia_gid, inode->i_gid))) {
793 f2fs_lock_op(F2FS_I_SB(inode));
794 err = dquot_transfer(inode, attr);
796 set_sbi_flag(F2FS_I_SB(inode),
797 SBI_QUOTA_NEED_REPAIR);
798 f2fs_unlock_op(F2FS_I_SB(inode));
802 * update uid/gid under lock_op(), so that dquot and inode can
803 * be updated atomically.
805 if (attr->ia_valid & ATTR_UID)
806 inode->i_uid = attr->ia_uid;
807 if (attr->ia_valid & ATTR_GID)
808 inode->i_gid = attr->ia_gid;
809 f2fs_mark_inode_dirty_sync(inode, true);
810 f2fs_unlock_op(F2FS_I_SB(inode));
813 if (attr->ia_valid & ATTR_SIZE) {
814 bool to_smaller = (attr->ia_size <= i_size_read(inode));
816 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
817 down_write(&F2FS_I(inode)->i_mmap_sem);
819 truncate_setsize(inode, attr->ia_size);
822 err = f2fs_truncate(inode);
824 * do not trim all blocks after i_size if target size is
825 * larger than i_size.
827 up_write(&F2FS_I(inode)->i_mmap_sem);
828 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
834 /* should convert inline inode here */
835 if (!f2fs_may_inline_data(inode)) {
836 err = f2fs_convert_inline_inode(inode);
840 inode->i_mtime = inode->i_ctime = current_time(inode);
843 down_write(&F2FS_I(inode)->i_sem);
844 F2FS_I(inode)->last_disk_size = i_size_read(inode);
845 up_write(&F2FS_I(inode)->i_sem);
848 __setattr_copy(inode, attr);
850 if (attr->ia_valid & ATTR_MODE) {
851 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
852 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
853 inode->i_mode = F2FS_I(inode)->i_acl_mode;
854 clear_inode_flag(inode, FI_ACL_MODE);
858 /* file size may changed here */
859 f2fs_mark_inode_dirty_sync(inode, true);
861 /* inode change will produce dirty node pages flushed by checkpoint */
862 f2fs_balance_fs(F2FS_I_SB(inode), true);
867 const struct inode_operations f2fs_file_inode_operations = {
868 .getattr = f2fs_getattr,
869 .setattr = f2fs_setattr,
870 .get_acl = f2fs_get_acl,
871 .set_acl = f2fs_set_acl,
872 #ifdef CONFIG_F2FS_FS_XATTR
873 .listxattr = f2fs_listxattr,
875 .fiemap = f2fs_fiemap,
878 static int fill_zero(struct inode *inode, pgoff_t index,
879 loff_t start, loff_t len)
881 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
887 f2fs_balance_fs(sbi, true);
890 page = f2fs_get_new_data_page(inode, NULL, index, false);
894 return PTR_ERR(page);
896 f2fs_wait_on_page_writeback(page, DATA, true, true);
897 zero_user(page, start, len);
898 set_page_dirty(page);
899 f2fs_put_page(page, 1);
903 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
907 while (pg_start < pg_end) {
908 struct dnode_of_data dn;
909 pgoff_t end_offset, count;
911 set_new_dnode(&dn, inode, NULL, NULL, 0);
912 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
914 if (err == -ENOENT) {
915 pg_start = f2fs_get_next_page_offset(&dn,
922 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
923 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
925 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
927 f2fs_truncate_data_blocks_range(&dn, count);
935 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
937 pgoff_t pg_start, pg_end;
938 loff_t off_start, off_end;
941 ret = f2fs_convert_inline_inode(inode);
945 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
946 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
948 off_start = offset & (PAGE_SIZE - 1);
949 off_end = (offset + len) & (PAGE_SIZE - 1);
951 if (pg_start == pg_end) {
952 ret = fill_zero(inode, pg_start, off_start,
953 off_end - off_start);
958 ret = fill_zero(inode, pg_start++, off_start,
959 PAGE_SIZE - off_start);
964 ret = fill_zero(inode, pg_end, 0, off_end);
969 if (pg_start < pg_end) {
970 struct address_space *mapping = inode->i_mapping;
971 loff_t blk_start, blk_end;
972 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
974 f2fs_balance_fs(sbi, true);
976 blk_start = (loff_t)pg_start << PAGE_SHIFT;
977 blk_end = (loff_t)pg_end << PAGE_SHIFT;
979 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
980 down_write(&F2FS_I(inode)->i_mmap_sem);
982 truncate_inode_pages_range(mapping, blk_start,
986 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
989 up_write(&F2FS_I(inode)->i_mmap_sem);
990 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
997 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
998 int *do_replace, pgoff_t off, pgoff_t len)
1000 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1001 struct dnode_of_data dn;
1005 set_new_dnode(&dn, inode, NULL, NULL, 0);
1006 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1007 if (ret && ret != -ENOENT) {
1009 } else if (ret == -ENOENT) {
1010 if (dn.max_level == 0)
1012 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node,
1019 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1020 dn.ofs_in_node, len);
1021 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1022 *blkaddr = datablock_addr(dn.inode,
1023 dn.node_page, dn.ofs_in_node);
1025 if (__is_valid_data_blkaddr(*blkaddr) &&
1026 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1027 DATA_GENERIC_ENHANCE)) {
1028 f2fs_put_dnode(&dn);
1029 return -EFSCORRUPTED;
1032 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1034 if (test_opt(sbi, LFS)) {
1035 f2fs_put_dnode(&dn);
1039 /* do not invalidate this block address */
1040 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1044 f2fs_put_dnode(&dn);
1053 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1054 int *do_replace, pgoff_t off, int len)
1056 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1057 struct dnode_of_data dn;
1060 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1061 if (*do_replace == 0)
1064 set_new_dnode(&dn, inode, NULL, NULL, 0);
1065 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1067 dec_valid_block_count(sbi, inode, 1);
1068 f2fs_invalidate_blocks(sbi, *blkaddr);
1070 f2fs_update_data_blkaddr(&dn, *blkaddr);
1072 f2fs_put_dnode(&dn);
1077 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1078 block_t *blkaddr, int *do_replace,
1079 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1081 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1086 if (blkaddr[i] == NULL_ADDR && !full) {
1091 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1092 struct dnode_of_data dn;
1093 struct node_info ni;
1097 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1098 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1102 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1104 f2fs_put_dnode(&dn);
1108 ilen = min((pgoff_t)
1109 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1110 dn.ofs_in_node, len - i);
1112 dn.data_blkaddr = datablock_addr(dn.inode,
1113 dn.node_page, dn.ofs_in_node);
1114 f2fs_truncate_data_blocks_range(&dn, 1);
1116 if (do_replace[i]) {
1117 f2fs_i_blocks_write(src_inode,
1119 f2fs_i_blocks_write(dst_inode,
1121 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1122 blkaddr[i], ni.version, true, false);
1128 new_size = (dst + i) << PAGE_SHIFT;
1129 if (dst_inode->i_size < new_size)
1130 f2fs_i_size_write(dst_inode, new_size);
1131 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1133 f2fs_put_dnode(&dn);
1135 struct page *psrc, *pdst;
1137 psrc = f2fs_get_lock_data_page(src_inode,
1140 return PTR_ERR(psrc);
1141 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1144 f2fs_put_page(psrc, 1);
1145 return PTR_ERR(pdst);
1147 f2fs_copy_page(psrc, pdst);
1148 set_page_dirty(pdst);
1149 f2fs_put_page(pdst, 1);
1150 f2fs_put_page(psrc, 1);
1152 ret = f2fs_truncate_hole(src_inode,
1153 src + i, src + i + 1);
1162 static int __exchange_data_block(struct inode *src_inode,
1163 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1164 pgoff_t len, bool full)
1166 block_t *src_blkaddr;
1172 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1174 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1175 array_size(olen, sizeof(block_t)),
1180 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1181 array_size(olen, sizeof(int)),
1184 kvfree(src_blkaddr);
1188 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1189 do_replace, src, olen);
1193 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1194 do_replace, src, dst, olen, full);
1202 kvfree(src_blkaddr);
1208 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1209 kvfree(src_blkaddr);
1214 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1216 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1217 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1218 pgoff_t start = offset >> PAGE_SHIFT;
1219 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1222 f2fs_balance_fs(sbi, true);
1224 /* avoid gc operation during block exchange */
1225 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1226 down_write(&F2FS_I(inode)->i_mmap_sem);
1229 f2fs_drop_extent_tree(inode);
1230 truncate_pagecache(inode, offset);
1231 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1232 f2fs_unlock_op(sbi);
1234 up_write(&F2FS_I(inode)->i_mmap_sem);
1235 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1239 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1244 if (offset + len >= i_size_read(inode))
1247 /* collapse range should be aligned to block size of f2fs. */
1248 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1251 ret = f2fs_convert_inline_inode(inode);
1255 /* write out all dirty pages from offset */
1256 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1260 ret = f2fs_do_collapse(inode, offset, len);
1264 /* write out all moved pages, if possible */
1265 down_write(&F2FS_I(inode)->i_mmap_sem);
1266 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1267 truncate_pagecache(inode, offset);
1269 new_size = i_size_read(inode) - len;
1270 truncate_pagecache(inode, new_size);
1272 ret = f2fs_truncate_blocks(inode, new_size, true);
1273 up_write(&F2FS_I(inode)->i_mmap_sem);
1275 f2fs_i_size_write(inode, new_size);
1279 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1282 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1283 pgoff_t index = start;
1284 unsigned int ofs_in_node = dn->ofs_in_node;
1288 for (; index < end; index++, dn->ofs_in_node++) {
1289 if (datablock_addr(dn->inode, dn->node_page,
1290 dn->ofs_in_node) == NULL_ADDR)
1294 dn->ofs_in_node = ofs_in_node;
1295 ret = f2fs_reserve_new_blocks(dn, count);
1299 dn->ofs_in_node = ofs_in_node;
1300 for (index = start; index < end; index++, dn->ofs_in_node++) {
1301 dn->data_blkaddr = datablock_addr(dn->inode,
1302 dn->node_page, dn->ofs_in_node);
1304 * f2fs_reserve_new_blocks will not guarantee entire block
1307 if (dn->data_blkaddr == NULL_ADDR) {
1311 if (dn->data_blkaddr != NEW_ADDR) {
1312 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1313 dn->data_blkaddr = NEW_ADDR;
1314 f2fs_set_data_blkaddr(dn);
1318 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1323 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1326 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1327 struct address_space *mapping = inode->i_mapping;
1328 pgoff_t index, pg_start, pg_end;
1329 loff_t new_size = i_size_read(inode);
1330 loff_t off_start, off_end;
1333 ret = inode_newsize_ok(inode, (len + offset));
1337 ret = f2fs_convert_inline_inode(inode);
1341 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1345 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1346 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1348 off_start = offset & (PAGE_SIZE - 1);
1349 off_end = (offset + len) & (PAGE_SIZE - 1);
1351 if (pg_start == pg_end) {
1352 ret = fill_zero(inode, pg_start, off_start,
1353 off_end - off_start);
1357 new_size = max_t(loff_t, new_size, offset + len);
1360 ret = fill_zero(inode, pg_start++, off_start,
1361 PAGE_SIZE - off_start);
1365 new_size = max_t(loff_t, new_size,
1366 (loff_t)pg_start << PAGE_SHIFT);
1369 for (index = pg_start; index < pg_end;) {
1370 struct dnode_of_data dn;
1371 unsigned int end_offset;
1374 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1375 down_write(&F2FS_I(inode)->i_mmap_sem);
1377 truncate_pagecache_range(inode,
1378 (loff_t)index << PAGE_SHIFT,
1379 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1383 set_new_dnode(&dn, inode, NULL, NULL, 0);
1384 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1386 f2fs_unlock_op(sbi);
1387 up_write(&F2FS_I(inode)->i_mmap_sem);
1388 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1392 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1393 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1395 ret = f2fs_do_zero_range(&dn, index, end);
1396 f2fs_put_dnode(&dn);
1398 f2fs_unlock_op(sbi);
1399 up_write(&F2FS_I(inode)->i_mmap_sem);
1400 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1402 f2fs_balance_fs(sbi, dn.node_changed);
1408 new_size = max_t(loff_t, new_size,
1409 (loff_t)index << PAGE_SHIFT);
1413 ret = fill_zero(inode, pg_end, 0, off_end);
1417 new_size = max_t(loff_t, new_size, offset + len);
1422 if (new_size > i_size_read(inode)) {
1423 if (mode & FALLOC_FL_KEEP_SIZE)
1424 file_set_keep_isize(inode);
1426 f2fs_i_size_write(inode, new_size);
1431 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1433 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1434 pgoff_t nr, pg_start, pg_end, delta, idx;
1438 new_size = i_size_read(inode) + len;
1439 ret = inode_newsize_ok(inode, new_size);
1443 if (offset >= i_size_read(inode))
1446 /* insert range should be aligned to block size of f2fs. */
1447 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1450 ret = f2fs_convert_inline_inode(inode);
1454 f2fs_balance_fs(sbi, true);
1456 down_write(&F2FS_I(inode)->i_mmap_sem);
1457 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1458 up_write(&F2FS_I(inode)->i_mmap_sem);
1462 /* write out all dirty pages from offset */
1463 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1467 pg_start = offset >> PAGE_SHIFT;
1468 pg_end = (offset + len) >> PAGE_SHIFT;
1469 delta = pg_end - pg_start;
1470 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1472 /* avoid gc operation during block exchange */
1473 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1474 down_write(&F2FS_I(inode)->i_mmap_sem);
1475 truncate_pagecache(inode, offset);
1477 while (!ret && idx > pg_start) {
1478 nr = idx - pg_start;
1484 f2fs_drop_extent_tree(inode);
1486 ret = __exchange_data_block(inode, inode, idx,
1487 idx + delta, nr, false);
1488 f2fs_unlock_op(sbi);
1490 up_write(&F2FS_I(inode)->i_mmap_sem);
1491 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1493 /* write out all moved pages, if possible */
1494 down_write(&F2FS_I(inode)->i_mmap_sem);
1495 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1496 truncate_pagecache(inode, offset);
1497 up_write(&F2FS_I(inode)->i_mmap_sem);
1500 f2fs_i_size_write(inode, new_size);
1504 static int expand_inode_data(struct inode *inode, loff_t offset,
1505 loff_t len, int mode)
1507 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1508 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1509 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1510 .m_may_create = true };
1512 loff_t new_size = i_size_read(inode);
1516 err = inode_newsize_ok(inode, (len + offset));
1520 err = f2fs_convert_inline_inode(inode);
1524 f2fs_balance_fs(sbi, true);
1526 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1527 off_end = (offset + len) & (PAGE_SIZE - 1);
1529 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1530 map.m_len = pg_end - map.m_lblk;
1534 if (f2fs_is_pinned_file(inode))
1535 map.m_seg_type = CURSEG_COLD_DATA;
1537 err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
1538 F2FS_GET_BLOCK_PRE_DIO :
1539 F2FS_GET_BLOCK_PRE_AIO));
1546 last_off = map.m_lblk + map.m_len - 1;
1548 /* update new size to the failed position */
1549 new_size = (last_off == pg_end) ? offset + len :
1550 (loff_t)(last_off + 1) << PAGE_SHIFT;
1552 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1555 if (new_size > i_size_read(inode)) {
1556 if (mode & FALLOC_FL_KEEP_SIZE)
1557 file_set_keep_isize(inode);
1559 f2fs_i_size_write(inode, new_size);
1565 static long f2fs_fallocate(struct file *file, int mode,
1566 loff_t offset, loff_t len)
1568 struct inode *inode = file_inode(file);
1571 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1573 ret = f2fs_is_checkpoint_ready(F2FS_I_SB(inode));
1577 /* f2fs only support ->fallocate for regular file */
1578 if (!S_ISREG(inode->i_mode))
1581 if (IS_ENCRYPTED(inode) &&
1582 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1585 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1586 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1587 FALLOC_FL_INSERT_RANGE))
1592 if (mode & FALLOC_FL_PUNCH_HOLE) {
1593 if (offset >= inode->i_size)
1596 ret = punch_hole(inode, offset, len);
1597 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1598 ret = f2fs_collapse_range(inode, offset, len);
1599 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1600 ret = f2fs_zero_range(inode, offset, len, mode);
1601 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1602 ret = f2fs_insert_range(inode, offset, len);
1604 ret = expand_inode_data(inode, offset, len, mode);
1608 inode->i_mtime = inode->i_ctime = current_time(inode);
1609 f2fs_mark_inode_dirty_sync(inode, false);
1610 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1614 inode_unlock(inode);
1616 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1620 static int f2fs_release_file(struct inode *inode, struct file *filp)
1623 * f2fs_relase_file is called at every close calls. So we should
1624 * not drop any inmemory pages by close called by other process.
1626 if (!(filp->f_mode & FMODE_WRITE) ||
1627 atomic_read(&inode->i_writecount) != 1)
1630 /* some remained atomic pages should discarded */
1631 if (f2fs_is_atomic_file(inode))
1632 f2fs_drop_inmem_pages(inode);
1633 if (f2fs_is_volatile_file(inode)) {
1634 set_inode_flag(inode, FI_DROP_CACHE);
1635 filemap_fdatawrite(inode->i_mapping);
1636 clear_inode_flag(inode, FI_DROP_CACHE);
1637 clear_inode_flag(inode, FI_VOLATILE_FILE);
1638 stat_dec_volatile_write(inode);
1643 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1645 struct inode *inode = file_inode(file);
1648 * If the process doing a transaction is crashed, we should do
1649 * roll-back. Otherwise, other reader/write can see corrupted database
1650 * until all the writers close its file. Since this should be done
1651 * before dropping file lock, it needs to do in ->flush.
1653 if (f2fs_is_atomic_file(inode) &&
1654 F2FS_I(inode)->inmem_task == current)
1655 f2fs_drop_inmem_pages(inode);
1659 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1661 struct f2fs_inode_info *fi = F2FS_I(inode);
1663 /* Is it quota file? Do not allow user to mess with it */
1664 if (IS_NOQUOTA(inode))
1667 fi->i_flags = iflags | (fi->i_flags & ~mask);
1669 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1670 set_inode_flag(inode, FI_PROJ_INHERIT);
1672 clear_inode_flag(inode, FI_PROJ_INHERIT);
1674 inode->i_ctime = current_time(inode);
1675 f2fs_set_inode_flags(inode);
1676 f2fs_mark_inode_dirty_sync(inode, true);
1680 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1683 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1684 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1685 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1686 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1689 static const struct {
1692 } f2fs_fsflags_map[] = {
1693 { F2FS_SYNC_FL, FS_SYNC_FL },
1694 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1695 { F2FS_APPEND_FL, FS_APPEND_FL },
1696 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1697 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1698 { F2FS_INDEX_FL, FS_INDEX_FL },
1699 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1700 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1703 #define F2FS_GETTABLE_FS_FL ( \
1711 FS_PROJINHERIT_FL | \
1713 FS_INLINE_DATA_FL | \
1716 #define F2FS_SETTABLE_FS_FL ( \
1725 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1726 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1731 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1732 if (iflags & f2fs_fsflags_map[i].iflag)
1733 fsflags |= f2fs_fsflags_map[i].fsflag;
1738 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1739 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1744 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1745 if (fsflags & f2fs_fsflags_map[i].fsflag)
1746 iflags |= f2fs_fsflags_map[i].iflag;
1751 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1753 struct inode *inode = file_inode(filp);
1754 struct f2fs_inode_info *fi = F2FS_I(inode);
1755 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1757 if (IS_ENCRYPTED(inode))
1758 fsflags |= FS_ENCRYPT_FL;
1759 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1760 fsflags |= FS_INLINE_DATA_FL;
1761 if (is_inode_flag_set(inode, FI_PIN_FILE))
1762 fsflags |= FS_NOCOW_FL;
1764 fsflags &= F2FS_GETTABLE_FS_FL;
1766 return put_user(fsflags, (int __user *)arg);
1769 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1771 struct inode *inode = file_inode(filp);
1772 struct f2fs_inode_info *fi = F2FS_I(inode);
1773 u32 fsflags, old_fsflags;
1777 if (!inode_owner_or_capable(inode))
1780 if (get_user(fsflags, (int __user *)arg))
1783 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1785 fsflags &= F2FS_SETTABLE_FS_FL;
1787 iflags = f2fs_fsflags_to_iflags(fsflags);
1788 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1791 ret = mnt_want_write_file(filp);
1797 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1798 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1802 ret = f2fs_setflags_common(inode, iflags,
1803 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1805 inode_unlock(inode);
1806 mnt_drop_write_file(filp);
1810 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1812 struct inode *inode = file_inode(filp);
1814 return put_user(inode->i_generation, (int __user *)arg);
1817 static int f2fs_ioc_start_atomic_write(struct file *filp)
1819 struct inode *inode = file_inode(filp);
1822 if (!inode_owner_or_capable(inode))
1825 if (!S_ISREG(inode->i_mode))
1828 if (filp->f_flags & O_DIRECT)
1831 ret = mnt_want_write_file(filp);
1837 if (f2fs_is_atomic_file(inode)) {
1838 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1843 ret = f2fs_convert_inline_inode(inode);
1847 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1850 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
1851 * f2fs_is_atomic_file.
1853 if (get_dirty_pages(inode))
1854 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1855 inode->i_ino, get_dirty_pages(inode));
1856 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1858 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1862 set_inode_flag(inode, FI_ATOMIC_FILE);
1863 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1864 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1866 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1867 F2FS_I(inode)->inmem_task = current;
1868 stat_inc_atomic_write(inode);
1869 stat_update_max_atomic_write(inode);
1871 inode_unlock(inode);
1872 mnt_drop_write_file(filp);
1876 static int f2fs_ioc_commit_atomic_write(struct file *filp)
1878 struct inode *inode = file_inode(filp);
1881 if (!inode_owner_or_capable(inode))
1884 ret = mnt_want_write_file(filp);
1888 f2fs_balance_fs(F2FS_I_SB(inode), true);
1892 if (f2fs_is_volatile_file(inode)) {
1897 if (f2fs_is_atomic_file(inode)) {
1898 ret = f2fs_commit_inmem_pages(inode);
1902 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1904 clear_inode_flag(inode, FI_ATOMIC_FILE);
1905 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
1906 stat_dec_atomic_write(inode);
1909 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1912 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
1913 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1916 inode_unlock(inode);
1917 mnt_drop_write_file(filp);
1921 static int f2fs_ioc_start_volatile_write(struct file *filp)
1923 struct inode *inode = file_inode(filp);
1926 if (!inode_owner_or_capable(inode))
1929 if (!S_ISREG(inode->i_mode))
1932 ret = mnt_want_write_file(filp);
1938 if (f2fs_is_volatile_file(inode))
1941 ret = f2fs_convert_inline_inode(inode);
1945 stat_inc_volatile_write(inode);
1946 stat_update_max_volatile_write(inode);
1948 set_inode_flag(inode, FI_VOLATILE_FILE);
1949 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1951 inode_unlock(inode);
1952 mnt_drop_write_file(filp);
1956 static int f2fs_ioc_release_volatile_write(struct file *filp)
1958 struct inode *inode = file_inode(filp);
1961 if (!inode_owner_or_capable(inode))
1964 ret = mnt_want_write_file(filp);
1970 if (!f2fs_is_volatile_file(inode))
1973 if (!f2fs_is_first_block_written(inode)) {
1974 ret = truncate_partial_data_page(inode, 0, true);
1978 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
1980 inode_unlock(inode);
1981 mnt_drop_write_file(filp);
1985 static int f2fs_ioc_abort_volatile_write(struct file *filp)
1987 struct inode *inode = file_inode(filp);
1990 if (!inode_owner_or_capable(inode))
1993 ret = mnt_want_write_file(filp);
1999 if (f2fs_is_atomic_file(inode))
2000 f2fs_drop_inmem_pages(inode);
2001 if (f2fs_is_volatile_file(inode)) {
2002 clear_inode_flag(inode, FI_VOLATILE_FILE);
2003 stat_dec_volatile_write(inode);
2004 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2007 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2009 inode_unlock(inode);
2011 mnt_drop_write_file(filp);
2012 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2016 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2018 struct inode *inode = file_inode(filp);
2019 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2020 struct super_block *sb = sbi->sb;
2024 if (!capable(CAP_SYS_ADMIN))
2027 if (get_user(in, (__u32 __user *)arg))
2030 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2031 ret = mnt_want_write_file(filp);
2037 case F2FS_GOING_DOWN_FULLSYNC:
2038 sb = freeze_bdev(sb->s_bdev);
2044 f2fs_stop_checkpoint(sbi, false);
2045 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2046 thaw_bdev(sb->s_bdev, sb);
2049 case F2FS_GOING_DOWN_METASYNC:
2050 /* do checkpoint only */
2051 ret = f2fs_sync_fs(sb, 1);
2054 f2fs_stop_checkpoint(sbi, false);
2055 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2057 case F2FS_GOING_DOWN_NOSYNC:
2058 f2fs_stop_checkpoint(sbi, false);
2059 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2061 case F2FS_GOING_DOWN_METAFLUSH:
2062 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2063 f2fs_stop_checkpoint(sbi, false);
2064 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2066 case F2FS_GOING_DOWN_NEED_FSCK:
2067 set_sbi_flag(sbi, SBI_NEED_FSCK);
2068 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2069 set_sbi_flag(sbi, SBI_IS_DIRTY);
2070 /* do checkpoint only */
2071 ret = f2fs_sync_fs(sb, 1);
2078 f2fs_stop_gc_thread(sbi);
2079 f2fs_stop_discard_thread(sbi);
2081 f2fs_drop_discard_cmd(sbi);
2082 clear_opt(sbi, DISCARD);
2084 f2fs_update_time(sbi, REQ_TIME);
2086 if (in != F2FS_GOING_DOWN_FULLSYNC)
2087 mnt_drop_write_file(filp);
2089 trace_f2fs_shutdown(sbi, in, ret);
2094 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2096 struct inode *inode = file_inode(filp);
2097 struct super_block *sb = inode->i_sb;
2098 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2099 struct fstrim_range range;
2102 if (!capable(CAP_SYS_ADMIN))
2105 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2108 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2112 ret = mnt_want_write_file(filp);
2116 range.minlen = max((unsigned int)range.minlen,
2117 q->limits.discard_granularity);
2118 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2119 mnt_drop_write_file(filp);
2123 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2126 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2130 static bool uuid_is_nonzero(__u8 u[16])
2134 for (i = 0; i < 16; i++)
2140 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2142 struct inode *inode = file_inode(filp);
2144 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2147 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2149 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2152 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2154 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2156 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2159 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2161 struct inode *inode = file_inode(filp);
2162 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2165 if (!f2fs_sb_has_encrypt(sbi))
2168 err = mnt_want_write_file(filp);
2172 down_write(&sbi->sb_lock);
2174 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2177 /* update superblock with uuid */
2178 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2180 err = f2fs_commit_super(sbi, false);
2183 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2187 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2191 up_write(&sbi->sb_lock);
2192 mnt_drop_write_file(filp);
2196 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2198 struct inode *inode = file_inode(filp);
2199 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2203 if (!capable(CAP_SYS_ADMIN))
2206 if (get_user(sync, (__u32 __user *)arg))
2209 if (f2fs_readonly(sbi->sb))
2212 ret = mnt_want_write_file(filp);
2217 if (!mutex_trylock(&sbi->gc_mutex)) {
2222 mutex_lock(&sbi->gc_mutex);
2225 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2227 mnt_drop_write_file(filp);
2231 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2233 struct inode *inode = file_inode(filp);
2234 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2235 struct f2fs_gc_range range;
2239 if (!capable(CAP_SYS_ADMIN))
2242 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2246 if (f2fs_readonly(sbi->sb))
2249 end = range.start + range.len;
2250 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
2254 ret = mnt_want_write_file(filp);
2260 if (!mutex_trylock(&sbi->gc_mutex)) {
2265 mutex_lock(&sbi->gc_mutex);
2268 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2269 range.start += BLKS_PER_SEC(sbi);
2270 if (range.start <= end)
2273 mnt_drop_write_file(filp);
2277 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2279 struct inode *inode = file_inode(filp);
2280 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2283 if (!capable(CAP_SYS_ADMIN))
2286 if (f2fs_readonly(sbi->sb))
2289 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2290 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2294 ret = mnt_want_write_file(filp);
2298 ret = f2fs_sync_fs(sbi->sb, 1);
2300 mnt_drop_write_file(filp);
2304 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2306 struct f2fs_defragment *range)
2308 struct inode *inode = file_inode(filp);
2309 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2310 .m_seg_type = NO_CHECK_TYPE ,
2311 .m_may_create = false };
2312 struct extent_info ei = {0, 0, 0};
2313 pgoff_t pg_start, pg_end, next_pgofs;
2314 unsigned int blk_per_seg = sbi->blocks_per_seg;
2315 unsigned int total = 0, sec_num;
2316 block_t blk_end = 0;
2317 bool fragmented = false;
2320 /* if in-place-update policy is enabled, don't waste time here */
2321 if (f2fs_should_update_inplace(inode, NULL))
2324 pg_start = range->start >> PAGE_SHIFT;
2325 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2327 f2fs_balance_fs(sbi, true);
2331 /* writeback all dirty pages in the range */
2332 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2333 range->start + range->len - 1);
2338 * lookup mapping info in extent cache, skip defragmenting if physical
2339 * block addresses are continuous.
2341 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2342 if (ei.fofs + ei.len >= pg_end)
2346 map.m_lblk = pg_start;
2347 map.m_next_pgofs = &next_pgofs;
2350 * lookup mapping info in dnode page cache, skip defragmenting if all
2351 * physical block addresses are continuous even if there are hole(s)
2352 * in logical blocks.
2354 while (map.m_lblk < pg_end) {
2355 map.m_len = pg_end - map.m_lblk;
2356 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2360 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2361 map.m_lblk = next_pgofs;
2365 if (blk_end && blk_end != map.m_pblk)
2368 /* record total count of block that we're going to move */
2371 blk_end = map.m_pblk + map.m_len;
2373 map.m_lblk += map.m_len;
2379 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2382 * make sure there are enough free section for LFS allocation, this can
2383 * avoid defragment running in SSR mode when free section are allocated
2386 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2391 map.m_lblk = pg_start;
2392 map.m_len = pg_end - pg_start;
2395 while (map.m_lblk < pg_end) {
2400 map.m_len = pg_end - map.m_lblk;
2401 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2405 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2406 map.m_lblk = next_pgofs;
2410 set_inode_flag(inode, FI_DO_DEFRAG);
2413 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2416 page = f2fs_get_lock_data_page(inode, idx, true);
2418 err = PTR_ERR(page);
2422 set_page_dirty(page);
2423 f2fs_put_page(page, 1);
2432 if (idx < pg_end && cnt < blk_per_seg)
2435 clear_inode_flag(inode, FI_DO_DEFRAG);
2437 err = filemap_fdatawrite(inode->i_mapping);
2442 clear_inode_flag(inode, FI_DO_DEFRAG);
2444 inode_unlock(inode);
2446 range->len = (u64)total << PAGE_SHIFT;
2450 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2452 struct inode *inode = file_inode(filp);
2453 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2454 struct f2fs_defragment range;
2457 if (!capable(CAP_SYS_ADMIN))
2460 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2463 if (f2fs_readonly(sbi->sb))
2466 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2470 /* verify alignment of offset & size */
2471 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2474 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2475 sbi->max_file_blocks))
2478 err = mnt_want_write_file(filp);
2482 err = f2fs_defragment_range(sbi, filp, &range);
2483 mnt_drop_write_file(filp);
2485 f2fs_update_time(sbi, REQ_TIME);
2489 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2496 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2497 struct file *file_out, loff_t pos_out, size_t len)
2499 struct inode *src = file_inode(file_in);
2500 struct inode *dst = file_inode(file_out);
2501 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2502 size_t olen = len, dst_max_i_size = 0;
2506 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2507 src->i_sb != dst->i_sb)
2510 if (unlikely(f2fs_readonly(src->i_sb)))
2513 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2516 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2520 if (pos_in == pos_out)
2522 if (pos_out > pos_in && pos_out < pos_in + len)
2529 if (!inode_trylock(dst))
2534 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2537 olen = len = src->i_size - pos_in;
2538 if (pos_in + len == src->i_size)
2539 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2545 dst_osize = dst->i_size;
2546 if (pos_out + olen > dst->i_size)
2547 dst_max_i_size = pos_out + olen;
2549 /* verify the end result is block aligned */
2550 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2551 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2552 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2555 ret = f2fs_convert_inline_inode(src);
2559 ret = f2fs_convert_inline_inode(dst);
2563 /* write out all dirty pages from offset */
2564 ret = filemap_write_and_wait_range(src->i_mapping,
2565 pos_in, pos_in + len);
2569 ret = filemap_write_and_wait_range(dst->i_mapping,
2570 pos_out, pos_out + len);
2574 f2fs_balance_fs(sbi, true);
2576 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2579 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2584 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2585 pos_out >> F2FS_BLKSIZE_BITS,
2586 len >> F2FS_BLKSIZE_BITS, false);
2590 f2fs_i_size_write(dst, dst_max_i_size);
2591 else if (dst_osize != dst->i_size)
2592 f2fs_i_size_write(dst, dst_osize);
2594 f2fs_unlock_op(sbi);
2597 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2599 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2608 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2610 struct f2fs_move_range range;
2614 if (!(filp->f_mode & FMODE_READ) ||
2615 !(filp->f_mode & FMODE_WRITE))
2618 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2622 dst = fdget(range.dst_fd);
2626 if (!(dst.file->f_mode & FMODE_WRITE)) {
2631 err = mnt_want_write_file(filp);
2635 err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2636 range.pos_out, range.len);
2638 mnt_drop_write_file(filp);
2642 if (copy_to_user((struct f2fs_move_range __user *)arg,
2643 &range, sizeof(range)))
2650 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2652 struct inode *inode = file_inode(filp);
2653 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2654 struct sit_info *sm = SIT_I(sbi);
2655 unsigned int start_segno = 0, end_segno = 0;
2656 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2657 struct f2fs_flush_device range;
2660 if (!capable(CAP_SYS_ADMIN))
2663 if (f2fs_readonly(sbi->sb))
2666 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2669 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2673 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2674 __is_large_section(sbi)) {
2675 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2676 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2680 ret = mnt_want_write_file(filp);
2684 if (range.dev_num != 0)
2685 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2686 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2688 start_segno = sm->last_victim[FLUSH_DEVICE];
2689 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2690 start_segno = dev_start_segno;
2691 end_segno = min(start_segno + range.segments, dev_end_segno);
2693 while (start_segno < end_segno) {
2694 if (!mutex_trylock(&sbi->gc_mutex)) {
2698 sm->last_victim[GC_CB] = end_segno + 1;
2699 sm->last_victim[GC_GREEDY] = end_segno + 1;
2700 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2701 ret = f2fs_gc(sbi, true, true, start_segno);
2709 mnt_drop_write_file(filp);
2713 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2715 struct inode *inode = file_inode(filp);
2716 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2718 /* Must validate to set it with SQLite behavior in Android. */
2719 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2721 return put_user(sb_feature, (u32 __user *)arg);
2725 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2727 struct dquot *transfer_to[MAXQUOTAS] = {};
2728 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2729 struct super_block *sb = sbi->sb;
2732 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2733 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2734 err = __dquot_transfer(inode, transfer_to);
2736 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2737 dqput(transfer_to[PRJQUOTA]);
2742 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2744 struct inode *inode = file_inode(filp);
2745 struct f2fs_inode_info *fi = F2FS_I(inode);
2746 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2751 if (!f2fs_sb_has_project_quota(sbi)) {
2752 if (projid != F2FS_DEF_PROJID)
2758 if (!f2fs_has_extra_attr(inode))
2761 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2763 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2767 /* Is it quota file? Do not allow user to mess with it */
2768 if (IS_NOQUOTA(inode))
2771 ipage = f2fs_get_node_page(sbi, inode->i_ino);
2773 return PTR_ERR(ipage);
2775 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2778 f2fs_put_page(ipage, 1);
2781 f2fs_put_page(ipage, 1);
2783 err = dquot_initialize(inode);
2788 err = f2fs_transfer_project_quota(inode, kprojid);
2792 F2FS_I(inode)->i_projid = kprojid;
2793 inode->i_ctime = current_time(inode);
2794 f2fs_mark_inode_dirty_sync(inode, true);
2796 f2fs_unlock_op(sbi);
2800 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2805 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2807 if (projid != F2FS_DEF_PROJID)
2813 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
2816 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
2817 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
2818 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
2821 static const struct {
2824 } f2fs_xflags_map[] = {
2825 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
2826 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
2827 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
2828 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
2829 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
2830 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
2833 #define F2FS_SUPPORTED_XFLAGS ( \
2835 FS_XFLAG_IMMUTABLE | \
2838 FS_XFLAG_NOATIME | \
2839 FS_XFLAG_PROJINHERIT)
2841 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
2842 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
2847 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2848 if (iflags & f2fs_xflags_map[i].iflag)
2849 xflags |= f2fs_xflags_map[i].xflag;
2854 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
2855 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
2860 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2861 if (xflags & f2fs_xflags_map[i].xflag)
2862 iflags |= f2fs_xflags_map[i].iflag;
2867 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
2869 struct f2fs_inode_info *fi = F2FS_I(inode);
2871 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
2873 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
2874 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
2877 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
2879 struct inode *inode = file_inode(filp);
2882 f2fs_fill_fsxattr(inode, &fa);
2884 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
2889 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2891 struct inode *inode = file_inode(filp);
2892 struct fsxattr fa, old_fa;
2896 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
2899 /* Make sure caller has proper permission */
2900 if (!inode_owner_or_capable(inode))
2903 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
2906 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
2907 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2910 err = mnt_want_write_file(filp);
2916 f2fs_fill_fsxattr(inode, &old_fa);
2917 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
2921 err = f2fs_setflags_common(inode, iflags,
2922 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
2926 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
2928 inode_unlock(inode);
2929 mnt_drop_write_file(filp);
2933 int f2fs_pin_file_control(struct inode *inode, bool inc)
2935 struct f2fs_inode_info *fi = F2FS_I(inode);
2936 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2938 /* Use i_gc_failures for normal file as a risk signal. */
2940 f2fs_i_gc_failures_write(inode,
2941 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
2943 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
2944 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
2945 __func__, inode->i_ino,
2946 fi->i_gc_failures[GC_FAILURE_PIN]);
2947 clear_inode_flag(inode, FI_PIN_FILE);
2953 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
2955 struct inode *inode = file_inode(filp);
2959 if (get_user(pin, (__u32 __user *)arg))
2962 if (!S_ISREG(inode->i_mode))
2965 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
2968 ret = mnt_want_write_file(filp);
2974 if (f2fs_should_update_outplace(inode, NULL)) {
2980 clear_inode_flag(inode, FI_PIN_FILE);
2981 f2fs_i_gc_failures_write(inode, 0);
2985 if (f2fs_pin_file_control(inode, false)) {
2989 ret = f2fs_convert_inline_inode(inode);
2993 set_inode_flag(inode, FI_PIN_FILE);
2994 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
2996 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2998 inode_unlock(inode);
2999 mnt_drop_write_file(filp);
3003 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3005 struct inode *inode = file_inode(filp);
3008 if (is_inode_flag_set(inode, FI_PIN_FILE))
3009 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3010 return put_user(pin, (u32 __user *)arg);
3013 int f2fs_precache_extents(struct inode *inode)
3015 struct f2fs_inode_info *fi = F2FS_I(inode);
3016 struct f2fs_map_blocks map;
3017 pgoff_t m_next_extent;
3021 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3025 map.m_next_pgofs = NULL;
3026 map.m_next_extent = &m_next_extent;
3027 map.m_seg_type = NO_CHECK_TYPE;
3028 map.m_may_create = false;
3029 end = F2FS_I_SB(inode)->max_file_blocks;
3031 while (map.m_lblk < end) {
3032 map.m_len = end - map.m_lblk;
3034 down_write(&fi->i_gc_rwsem[WRITE]);
3035 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3036 up_write(&fi->i_gc_rwsem[WRITE]);
3040 map.m_lblk = m_next_extent;
3046 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3048 return f2fs_precache_extents(file_inode(filp));
3051 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3053 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3057 if (!capable(CAP_SYS_ADMIN))
3060 if (f2fs_readonly(sbi->sb))
3063 if (copy_from_user(&block_count, (void __user *)arg,
3064 sizeof(block_count)))
3067 ret = f2fs_resize_fs(sbi, block_count);
3072 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3076 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3078 ret = f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)));
3083 case F2FS_IOC_GETFLAGS:
3084 return f2fs_ioc_getflags(filp, arg);
3085 case F2FS_IOC_SETFLAGS:
3086 return f2fs_ioc_setflags(filp, arg);
3087 case F2FS_IOC_GETVERSION:
3088 return f2fs_ioc_getversion(filp, arg);
3089 case F2FS_IOC_START_ATOMIC_WRITE:
3090 return f2fs_ioc_start_atomic_write(filp);
3091 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3092 return f2fs_ioc_commit_atomic_write(filp);
3093 case F2FS_IOC_START_VOLATILE_WRITE:
3094 return f2fs_ioc_start_volatile_write(filp);
3095 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3096 return f2fs_ioc_release_volatile_write(filp);
3097 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3098 return f2fs_ioc_abort_volatile_write(filp);
3099 case F2FS_IOC_SHUTDOWN:
3100 return f2fs_ioc_shutdown(filp, arg);
3102 return f2fs_ioc_fitrim(filp, arg);
3103 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3104 return f2fs_ioc_set_encryption_policy(filp, arg);
3105 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3106 return f2fs_ioc_get_encryption_policy(filp, arg);
3107 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3108 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3109 case F2FS_IOC_GARBAGE_COLLECT:
3110 return f2fs_ioc_gc(filp, arg);
3111 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3112 return f2fs_ioc_gc_range(filp, arg);
3113 case F2FS_IOC_WRITE_CHECKPOINT:
3114 return f2fs_ioc_write_checkpoint(filp, arg);
3115 case F2FS_IOC_DEFRAGMENT:
3116 return f2fs_ioc_defragment(filp, arg);
3117 case F2FS_IOC_MOVE_RANGE:
3118 return f2fs_ioc_move_range(filp, arg);
3119 case F2FS_IOC_FLUSH_DEVICE:
3120 return f2fs_ioc_flush_device(filp, arg);
3121 case F2FS_IOC_GET_FEATURES:
3122 return f2fs_ioc_get_features(filp, arg);
3123 case F2FS_IOC_FSGETXATTR:
3124 return f2fs_ioc_fsgetxattr(filp, arg);
3125 case F2FS_IOC_FSSETXATTR:
3126 return f2fs_ioc_fssetxattr(filp, arg);
3127 case F2FS_IOC_GET_PIN_FILE:
3128 return f2fs_ioc_get_pin_file(filp, arg);
3129 case F2FS_IOC_SET_PIN_FILE:
3130 return f2fs_ioc_set_pin_file(filp, arg);
3131 case F2FS_IOC_PRECACHE_EXTENTS:
3132 return f2fs_ioc_precache_extents(filp, arg);
3133 case F2FS_IOC_RESIZE_FS:
3134 return f2fs_ioc_resize_fs(filp, arg);
3140 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3142 struct file *file = iocb->ki_filp;
3143 struct inode *inode = file_inode(file);
3146 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
3151 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) {
3156 if (!inode_trylock(inode)) {
3157 if (iocb->ki_flags & IOCB_NOWAIT) {
3164 ret = generic_write_checks(iocb, from);
3166 bool preallocated = false;
3167 size_t target_size = 0;
3170 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
3171 set_inode_flag(inode, FI_NO_PREALLOC);
3173 if ((iocb->ki_flags & IOCB_NOWAIT)) {
3174 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
3175 iov_iter_count(from)) ||
3176 f2fs_has_inline_data(inode) ||
3177 f2fs_force_buffered_io(inode, iocb, from)) {
3178 clear_inode_flag(inode, FI_NO_PREALLOC);
3179 inode_unlock(inode);
3184 preallocated = true;
3185 target_size = iocb->ki_pos + iov_iter_count(from);
3187 err = f2fs_preallocate_blocks(iocb, from);
3189 clear_inode_flag(inode, FI_NO_PREALLOC);
3190 inode_unlock(inode);
3195 ret = __generic_file_write_iter(iocb, from);
3196 clear_inode_flag(inode, FI_NO_PREALLOC);
3198 /* if we couldn't write data, we should deallocate blocks. */
3199 if (preallocated && i_size_read(inode) < target_size)
3200 f2fs_truncate(inode);
3203 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
3205 inode_unlock(inode);
3207 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
3208 iov_iter_count(from), ret);
3210 ret = generic_write_sync(iocb, ret);
3214 #ifdef CONFIG_COMPAT
3215 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3218 case F2FS_IOC32_GETFLAGS:
3219 cmd = F2FS_IOC_GETFLAGS;
3221 case F2FS_IOC32_SETFLAGS:
3222 cmd = F2FS_IOC_SETFLAGS;
3224 case F2FS_IOC32_GETVERSION:
3225 cmd = F2FS_IOC_GETVERSION;
3227 case F2FS_IOC_START_ATOMIC_WRITE:
3228 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3229 case F2FS_IOC_START_VOLATILE_WRITE:
3230 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3231 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3232 case F2FS_IOC_SHUTDOWN:
3233 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3234 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3235 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3236 case F2FS_IOC_GARBAGE_COLLECT:
3237 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3238 case F2FS_IOC_WRITE_CHECKPOINT:
3239 case F2FS_IOC_DEFRAGMENT:
3240 case F2FS_IOC_MOVE_RANGE:
3241 case F2FS_IOC_FLUSH_DEVICE:
3242 case F2FS_IOC_GET_FEATURES:
3243 case F2FS_IOC_FSGETXATTR:
3244 case F2FS_IOC_FSSETXATTR:
3245 case F2FS_IOC_GET_PIN_FILE:
3246 case F2FS_IOC_SET_PIN_FILE:
3247 case F2FS_IOC_PRECACHE_EXTENTS:
3248 case F2FS_IOC_RESIZE_FS:
3251 return -ENOIOCTLCMD;
3253 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3257 const struct file_operations f2fs_file_operations = {
3258 .llseek = f2fs_llseek,
3259 .read_iter = generic_file_read_iter,
3260 .write_iter = f2fs_file_write_iter,
3261 .open = f2fs_file_open,
3262 .release = f2fs_release_file,
3263 .mmap = f2fs_file_mmap,
3264 .flush = f2fs_file_flush,
3265 .fsync = f2fs_sync_file,
3266 .fallocate = f2fs_fallocate,
3267 .unlocked_ioctl = f2fs_ioctl,
3268 #ifdef CONFIG_COMPAT
3269 .compat_ioctl = f2fs_compat_ioctl,
3271 .splice_read = generic_file_splice_read,
3272 .splice_write = iter_file_splice_write,