]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/ext4/file.c
nvmet: don't return "any" ip address in discovery log page
[linux.git] / fs / ext4 / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/file.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  from
11  *
12  *  linux/fs/minix/file.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  ext4 fs regular file handling primitives
17  *
18  *  64-bit file support on 64-bit platforms by Jakub Jelinek
19  *      (jj@sunsite.ms.mff.cuni.cz)
20  */
21
22 #include <linux/time.h>
23 #include <linux/fs.h>
24 #include <linux/iomap.h>
25 #include <linux/mount.h>
26 #include <linux/path.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/pagevec.h>
30 #include <linux/uio.h>
31 #include <linux/mman.h>
32 #include "ext4.h"
33 #include "ext4_jbd2.h"
34 #include "xattr.h"
35 #include "acl.h"
36
37 #ifdef CONFIG_FS_DAX
38 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
39 {
40         struct inode *inode = file_inode(iocb->ki_filp);
41         ssize_t ret;
42
43         if (!inode_trylock_shared(inode)) {
44                 if (iocb->ki_flags & IOCB_NOWAIT)
45                         return -EAGAIN;
46                 inode_lock_shared(inode);
47         }
48         /*
49          * Recheck under inode lock - at this point we are sure it cannot
50          * change anymore
51          */
52         if (!IS_DAX(inode)) {
53                 inode_unlock_shared(inode);
54                 /* Fallback to buffered IO in case we cannot support DAX */
55                 return generic_file_read_iter(iocb, to);
56         }
57         ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
58         inode_unlock_shared(inode);
59
60         file_accessed(iocb->ki_filp);
61         return ret;
62 }
63 #endif
64
65 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
66 {
67         if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
68                 return -EIO;
69
70         if (!iov_iter_count(to))
71                 return 0; /* skip atime */
72
73 #ifdef CONFIG_FS_DAX
74         if (IS_DAX(file_inode(iocb->ki_filp)))
75                 return ext4_dax_read_iter(iocb, to);
76 #endif
77         return generic_file_read_iter(iocb, to);
78 }
79
80 /*
81  * Called when an inode is released. Note that this is different
82  * from ext4_file_open: open gets called at every open, but release
83  * gets called only when /all/ the files are closed.
84  */
85 static int ext4_release_file(struct inode *inode, struct file *filp)
86 {
87         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
88                 ext4_alloc_da_blocks(inode);
89                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
90         }
91         /* if we are the last writer on the inode, drop the block reservation */
92         if ((filp->f_mode & FMODE_WRITE) &&
93                         (atomic_read(&inode->i_writecount) == 1) &&
94                         !EXT4_I(inode)->i_reserved_data_blocks)
95         {
96                 down_write(&EXT4_I(inode)->i_data_sem);
97                 ext4_discard_preallocations(inode);
98                 up_write(&EXT4_I(inode)->i_data_sem);
99         }
100         if (is_dx(inode) && filp->private_data)
101                 ext4_htree_free_dir_info(filp->private_data);
102
103         return 0;
104 }
105
106 static void ext4_unwritten_wait(struct inode *inode)
107 {
108         wait_queue_head_t *wq = ext4_ioend_wq(inode);
109
110         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
111 }
112
113 /*
114  * This tests whether the IO in question is block-aligned or not.
115  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
116  * are converted to written only after the IO is complete.  Until they are
117  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
118  * it needs to zero out portions of the start and/or end block.  If 2 AIO
119  * threads are at work on the same unwritten block, they must be synchronized
120  * or one thread will zero the other's data, causing corruption.
121  */
122 static int
123 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
124 {
125         struct super_block *sb = inode->i_sb;
126         int blockmask = sb->s_blocksize - 1;
127
128         if (pos >= i_size_read(inode))
129                 return 0;
130
131         if ((pos | iov_iter_alignment(from)) & blockmask)
132                 return 1;
133
134         return 0;
135 }
136
137 /* Is IO overwriting allocated and initialized blocks? */
138 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
139 {
140         struct ext4_map_blocks map;
141         unsigned int blkbits = inode->i_blkbits;
142         int err, blklen;
143
144         if (pos + len > i_size_read(inode))
145                 return false;
146
147         map.m_lblk = pos >> blkbits;
148         map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
149         blklen = map.m_len;
150
151         err = ext4_map_blocks(NULL, inode, &map, 0);
152         /*
153          * 'err==len' means that all of the blocks have been preallocated,
154          * regardless of whether they have been initialized or not. To exclude
155          * unwritten extents, we need to check m_flags.
156          */
157         return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
158 }
159
160 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
161 {
162         struct inode *inode = file_inode(iocb->ki_filp);
163         ssize_t ret;
164
165         ret = generic_write_checks(iocb, from);
166         if (ret <= 0)
167                 return ret;
168         /*
169          * If we have encountered a bitmap-format file, the size limit
170          * is smaller than s_maxbytes, which is for extent-mapped files.
171          */
172         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
173                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
174
175                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
176                         return -EFBIG;
177                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
178         }
179         return iov_iter_count(from);
180 }
181
182 #ifdef CONFIG_FS_DAX
183 static ssize_t
184 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
185 {
186         struct inode *inode = file_inode(iocb->ki_filp);
187         ssize_t ret;
188
189         if (!inode_trylock(inode)) {
190                 if (iocb->ki_flags & IOCB_NOWAIT)
191                         return -EAGAIN;
192                 inode_lock(inode);
193         }
194         ret = ext4_write_checks(iocb, from);
195         if (ret <= 0)
196                 goto out;
197         ret = file_remove_privs(iocb->ki_filp);
198         if (ret)
199                 goto out;
200         ret = file_update_time(iocb->ki_filp);
201         if (ret)
202                 goto out;
203
204         ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
205 out:
206         inode_unlock(inode);
207         if (ret > 0)
208                 ret = generic_write_sync(iocb, ret);
209         return ret;
210 }
211 #endif
212
213 static ssize_t
214 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
215 {
216         struct inode *inode = file_inode(iocb->ki_filp);
217         int o_direct = iocb->ki_flags & IOCB_DIRECT;
218         int unaligned_aio = 0;
219         int overwrite = 0;
220         ssize_t ret;
221
222         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
223                 return -EIO;
224
225 #ifdef CONFIG_FS_DAX
226         if (IS_DAX(inode))
227                 return ext4_dax_write_iter(iocb, from);
228 #endif
229         if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
230                 return -EOPNOTSUPP;
231
232         if (!inode_trylock(inode)) {
233                 if (iocb->ki_flags & IOCB_NOWAIT)
234                         return -EAGAIN;
235                 inode_lock(inode);
236         }
237
238         ret = ext4_write_checks(iocb, from);
239         if (ret <= 0)
240                 goto out;
241
242         /*
243          * Unaligned direct AIO must be serialized among each other as zeroing
244          * of partial blocks of two competing unaligned AIOs can result in data
245          * corruption.
246          */
247         if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
248             !is_sync_kiocb(iocb) &&
249             ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
250                 unaligned_aio = 1;
251                 ext4_unwritten_wait(inode);
252         }
253
254         iocb->private = &overwrite;
255         /* Check whether we do a DIO overwrite or not */
256         if (o_direct && !unaligned_aio) {
257                 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
258                         if (ext4_should_dioread_nolock(inode))
259                                 overwrite = 1;
260                 } else if (iocb->ki_flags & IOCB_NOWAIT) {
261                         ret = -EAGAIN;
262                         goto out;
263                 }
264         }
265
266         ret = __generic_file_write_iter(iocb, from);
267         inode_unlock(inode);
268
269         if (ret > 0)
270                 ret = generic_write_sync(iocb, ret);
271
272         return ret;
273
274 out:
275         inode_unlock(inode);
276         return ret;
277 }
278
279 #ifdef CONFIG_FS_DAX
280 static int ext4_dax_huge_fault(struct vm_fault *vmf,
281                 enum page_entry_size pe_size)
282 {
283         int result, error = 0;
284         int retries = 0;
285         handle_t *handle = NULL;
286         struct inode *inode = file_inode(vmf->vma->vm_file);
287         struct super_block *sb = inode->i_sb;
288
289         /*
290          * We have to distinguish real writes from writes which will result in a
291          * COW page; COW writes should *not* poke the journal (the file will not
292          * be changed). Doing so would cause unintended failures when mounted
293          * read-only.
294          *
295          * We check for VM_SHARED rather than vmf->cow_page since the latter is
296          * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
297          * other sizes, dax_iomap_fault will handle splitting / fallback so that
298          * we eventually come back with a COW page.
299          */
300         bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
301                 (vmf->vma->vm_flags & VM_SHARED);
302         pfn_t pfn;
303
304         if (write) {
305                 sb_start_pagefault(sb);
306                 file_update_time(vmf->vma->vm_file);
307                 down_read(&EXT4_I(inode)->i_mmap_sem);
308 retry:
309                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
310                                                EXT4_DATA_TRANS_BLOCKS(sb));
311                 if (IS_ERR(handle)) {
312                         up_read(&EXT4_I(inode)->i_mmap_sem);
313                         sb_end_pagefault(sb);
314                         return VM_FAULT_SIGBUS;
315                 }
316         } else {
317                 down_read(&EXT4_I(inode)->i_mmap_sem);
318         }
319         result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
320         if (write) {
321                 ext4_journal_stop(handle);
322
323                 if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
324                     ext4_should_retry_alloc(sb, &retries))
325                         goto retry;
326                 /* Handling synchronous page fault? */
327                 if (result & VM_FAULT_NEEDDSYNC)
328                         result = dax_finish_sync_fault(vmf, pe_size, pfn);
329                 up_read(&EXT4_I(inode)->i_mmap_sem);
330                 sb_end_pagefault(sb);
331         } else {
332                 up_read(&EXT4_I(inode)->i_mmap_sem);
333         }
334
335         return result;
336 }
337
338 static int ext4_dax_fault(struct vm_fault *vmf)
339 {
340         return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
341 }
342
343 static const struct vm_operations_struct ext4_dax_vm_ops = {
344         .fault          = ext4_dax_fault,
345         .huge_fault     = ext4_dax_huge_fault,
346         .page_mkwrite   = ext4_dax_fault,
347         .pfn_mkwrite    = ext4_dax_fault,
348 };
349 #else
350 #define ext4_dax_vm_ops ext4_file_vm_ops
351 #endif
352
353 static const struct vm_operations_struct ext4_file_vm_ops = {
354         .fault          = ext4_filemap_fault,
355         .map_pages      = filemap_map_pages,
356         .page_mkwrite   = ext4_page_mkwrite,
357 };
358
359 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
360 {
361         struct inode *inode = file->f_mapping->host;
362
363         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
364                 return -EIO;
365
366         /*
367          * We don't support synchronous mappings for non-DAX files. At least
368          * until someone comes with a sensible use case.
369          */
370         if (!IS_DAX(file_inode(file)) && (vma->vm_flags & VM_SYNC))
371                 return -EOPNOTSUPP;
372
373         file_accessed(file);
374         if (IS_DAX(file_inode(file))) {
375                 vma->vm_ops = &ext4_dax_vm_ops;
376                 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
377         } else {
378                 vma->vm_ops = &ext4_file_vm_ops;
379         }
380         return 0;
381 }
382
383 static int ext4_file_open(struct inode * inode, struct file * filp)
384 {
385         struct super_block *sb = inode->i_sb;
386         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
387         struct vfsmount *mnt = filp->f_path.mnt;
388         struct path path;
389         char buf[64], *cp;
390         int ret;
391
392         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
393                 return -EIO;
394
395         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
396                      !sb_rdonly(sb))) {
397                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
398                 /*
399                  * Sample where the filesystem has been mounted and
400                  * store it in the superblock for sysadmin convenience
401                  * when trying to sort through large numbers of block
402                  * devices or filesystem images.
403                  */
404                 memset(buf, 0, sizeof(buf));
405                 path.mnt = mnt;
406                 path.dentry = mnt->mnt_root;
407                 cp = d_path(&path, buf, sizeof(buf));
408                 if (!IS_ERR(cp)) {
409                         handle_t *handle;
410                         int err;
411
412                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
413                         if (IS_ERR(handle))
414                                 return PTR_ERR(handle);
415                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
416                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
417                         if (err) {
418                                 ext4_journal_stop(handle);
419                                 return err;
420                         }
421                         strlcpy(sbi->s_es->s_last_mounted, cp,
422                                 sizeof(sbi->s_es->s_last_mounted));
423                         ext4_handle_dirty_super(handle, sb);
424                         ext4_journal_stop(handle);
425                 }
426         }
427
428         ret = fscrypt_file_open(inode, filp);
429         if (ret)
430                 return ret;
431
432         /*
433          * Set up the jbd2_inode if we are opening the inode for
434          * writing and the journal is present
435          */
436         if (filp->f_mode & FMODE_WRITE) {
437                 ret = ext4_inode_attach_jinode(inode);
438                 if (ret < 0)
439                         return ret;
440         }
441
442         filp->f_mode |= FMODE_NOWAIT;
443         return dquot_file_open(inode, filp);
444 }
445
446 /*
447  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
448  * by calling generic_file_llseek_size() with the appropriate maxbytes
449  * value for each.
450  */
451 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
452 {
453         struct inode *inode = file->f_mapping->host;
454         loff_t maxbytes;
455
456         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
457                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
458         else
459                 maxbytes = inode->i_sb->s_maxbytes;
460
461         switch (whence) {
462         default:
463                 return generic_file_llseek_size(file, offset, whence,
464                                                 maxbytes, i_size_read(inode));
465         case SEEK_HOLE:
466                 inode_lock_shared(inode);
467                 offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
468                 inode_unlock_shared(inode);
469                 break;
470         case SEEK_DATA:
471                 inode_lock_shared(inode);
472                 offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
473                 inode_unlock_shared(inode);
474                 break;
475         }
476
477         if (offset < 0)
478                 return offset;
479         return vfs_setpos(file, offset, maxbytes);
480 }
481
482 const struct file_operations ext4_file_operations = {
483         .llseek         = ext4_llseek,
484         .read_iter      = ext4_file_read_iter,
485         .write_iter     = ext4_file_write_iter,
486         .unlocked_ioctl = ext4_ioctl,
487 #ifdef CONFIG_COMPAT
488         .compat_ioctl   = ext4_compat_ioctl,
489 #endif
490         .mmap           = ext4_file_mmap,
491         .mmap_supported_flags = MAP_SYNC,
492         .open           = ext4_file_open,
493         .release        = ext4_release_file,
494         .fsync          = ext4_sync_file,
495         .get_unmapped_area = thp_get_unmapped_area,
496         .splice_read    = generic_file_splice_read,
497         .splice_write   = iter_file_splice_write,
498         .fallocate      = ext4_fallocate,
499 };
500
501 const struct inode_operations ext4_file_inode_operations = {
502         .setattr        = ext4_setattr,
503         .getattr        = ext4_file_getattr,
504         .listxattr      = ext4_listxattr,
505         .get_acl        = ext4_get_acl,
506         .set_acl        = ext4_set_acl,
507         .fiemap         = ext4_fiemap,
508 };
509