]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/ceph/file.c
Merge branch 'vmwgfx-coherent' of git://people.freedesktop.org/~thomash/linux into...
[linux.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14
15 #include "super.h"
16 #include "mds_client.h"
17 #include "cache.h"
18 #include "io.h"
19
20 static __le32 ceph_flags_sys2wire(u32 flags)
21 {
22         u32 wire_flags = 0;
23
24         switch (flags & O_ACCMODE) {
25         case O_RDONLY:
26                 wire_flags |= CEPH_O_RDONLY;
27                 break;
28         case O_WRONLY:
29                 wire_flags |= CEPH_O_WRONLY;
30                 break;
31         case O_RDWR:
32                 wire_flags |= CEPH_O_RDWR;
33                 break;
34         }
35
36         flags &= ~O_ACCMODE;
37
38 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
39
40         ceph_sys2wire(O_CREAT);
41         ceph_sys2wire(O_EXCL);
42         ceph_sys2wire(O_TRUNC);
43         ceph_sys2wire(O_DIRECTORY);
44         ceph_sys2wire(O_NOFOLLOW);
45
46 #undef ceph_sys2wire
47
48         if (flags)
49                 dout("unused open flags: %x\n", flags);
50
51         return cpu_to_le32(wire_flags);
52 }
53
54 /*
55  * Ceph file operations
56  *
57  * Implement basic open/close functionality, and implement
58  * read/write.
59  *
60  * We implement three modes of file I/O:
61  *  - buffered uses the generic_file_aio_{read,write} helpers
62  *
63  *  - synchronous is used when there is multi-client read/write
64  *    sharing, avoids the page cache, and synchronously waits for an
65  *    ack from the OSD.
66  *
67  *  - direct io takes the variant of the sync path that references
68  *    user pages directly.
69  *
70  * fsync() flushes and waits on dirty pages, but just queues metadata
71  * for writeback: since the MDS can recover size and mtime there is no
72  * need to wait for MDS acknowledgement.
73  */
74
75 /*
76  * How many pages to get in one call to iov_iter_get_pages().  This
77  * determines the size of the on-stack array used as a buffer.
78  */
79 #define ITER_GET_BVECS_PAGES    64
80
81 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
82                                 struct bio_vec *bvecs)
83 {
84         size_t size = 0;
85         int bvec_idx = 0;
86
87         if (maxsize > iov_iter_count(iter))
88                 maxsize = iov_iter_count(iter);
89
90         while (size < maxsize) {
91                 struct page *pages[ITER_GET_BVECS_PAGES];
92                 ssize_t bytes;
93                 size_t start;
94                 int idx = 0;
95
96                 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
97                                            ITER_GET_BVECS_PAGES, &start);
98                 if (bytes < 0)
99                         return size ?: bytes;
100
101                 iov_iter_advance(iter, bytes);
102                 size += bytes;
103
104                 for ( ; bytes; idx++, bvec_idx++) {
105                         struct bio_vec bv = {
106                                 .bv_page = pages[idx],
107                                 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
108                                 .bv_offset = start,
109                         };
110
111                         bvecs[bvec_idx] = bv;
112                         bytes -= bv.bv_len;
113                         start = 0;
114                 }
115         }
116
117         return size;
118 }
119
120 /*
121  * iov_iter_get_pages() only considers one iov_iter segment, no matter
122  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
123  * page.
124  *
125  * Attempt to get up to @maxsize bytes worth of pages from @iter.
126  * Return the number of bytes in the created bio_vec array, or an error.
127  */
128 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
129                                     struct bio_vec **bvecs, int *num_bvecs)
130 {
131         struct bio_vec *bv;
132         size_t orig_count = iov_iter_count(iter);
133         ssize_t bytes;
134         int npages;
135
136         iov_iter_truncate(iter, maxsize);
137         npages = iov_iter_npages(iter, INT_MAX);
138         iov_iter_reexpand(iter, orig_count);
139
140         /*
141          * __iter_get_bvecs() may populate only part of the array -- zero it
142          * out.
143          */
144         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
145         if (!bv)
146                 return -ENOMEM;
147
148         bytes = __iter_get_bvecs(iter, maxsize, bv);
149         if (bytes < 0) {
150                 /*
151                  * No pages were pinned -- just free the array.
152                  */
153                 kvfree(bv);
154                 return bytes;
155         }
156
157         *bvecs = bv;
158         *num_bvecs = npages;
159         return bytes;
160 }
161
162 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
163 {
164         int i;
165
166         for (i = 0; i < num_bvecs; i++) {
167                 if (bvecs[i].bv_page) {
168                         if (should_dirty)
169                                 set_page_dirty_lock(bvecs[i].bv_page);
170                         put_page(bvecs[i].bv_page);
171                 }
172         }
173         kvfree(bvecs);
174 }
175
176 /*
177  * Prepare an open request.  Preallocate ceph_cap to avoid an
178  * inopportune ENOMEM later.
179  */
180 static struct ceph_mds_request *
181 prepare_open_request(struct super_block *sb, int flags, int create_mode)
182 {
183         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
184         struct ceph_mds_client *mdsc = fsc->mdsc;
185         struct ceph_mds_request *req;
186         int want_auth = USE_ANY_MDS;
187         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
188
189         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
190                 want_auth = USE_AUTH_MDS;
191
192         req = ceph_mdsc_create_request(mdsc, op, want_auth);
193         if (IS_ERR(req))
194                 goto out;
195         req->r_fmode = ceph_flags_to_mode(flags);
196         req->r_args.open.flags = ceph_flags_sys2wire(flags);
197         req->r_args.open.mode = cpu_to_le32(create_mode);
198 out:
199         return req;
200 }
201
202 static int ceph_init_file_info(struct inode *inode, struct file *file,
203                                         int fmode, bool isdir)
204 {
205         struct ceph_inode_info *ci = ceph_inode(inode);
206         struct ceph_file_info *fi;
207
208         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
209                         inode->i_mode, isdir ? "dir" : "regular");
210         BUG_ON(inode->i_fop->release != ceph_release);
211
212         if (isdir) {
213                 struct ceph_dir_file_info *dfi =
214                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
215                 if (!dfi) {
216                         ceph_put_fmode(ci, fmode); /* clean up */
217                         return -ENOMEM;
218                 }
219
220                 file->private_data = dfi;
221                 fi = &dfi->file_info;
222                 dfi->next_offset = 2;
223                 dfi->readdir_cache_idx = -1;
224         } else {
225                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
226                 if (!fi) {
227                         ceph_put_fmode(ci, fmode); /* clean up */
228                         return -ENOMEM;
229                 }
230
231                 file->private_data = fi;
232         }
233
234         fi->fmode = fmode;
235         spin_lock_init(&fi->rw_contexts_lock);
236         INIT_LIST_HEAD(&fi->rw_contexts);
237         fi->meta_err = errseq_sample(&ci->i_meta_err);
238         fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
239
240         return 0;
241 }
242
243 /*
244  * initialize private struct file data.
245  * if we fail, clean up by dropping fmode reference on the ceph_inode
246  */
247 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
248 {
249         int ret = 0;
250
251         switch (inode->i_mode & S_IFMT) {
252         case S_IFREG:
253                 ceph_fscache_register_inode_cookie(inode);
254                 ceph_fscache_file_set_cookie(inode, file);
255                 /* fall through */
256         case S_IFDIR:
257                 ret = ceph_init_file_info(inode, file, fmode,
258                                                 S_ISDIR(inode->i_mode));
259                 if (ret)
260                         return ret;
261                 break;
262
263         case S_IFLNK:
264                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
265                      inode->i_mode);
266                 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
267                 break;
268
269         default:
270                 dout("init_file %p %p 0%o (special)\n", inode, file,
271                      inode->i_mode);
272                 /*
273                  * we need to drop the open ref now, since we don't
274                  * have .release set to ceph_release.
275                  */
276                 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
277                 BUG_ON(inode->i_fop->release == ceph_release);
278
279                 /* call the proper open fop */
280                 ret = inode->i_fop->open(inode, file);
281         }
282         return ret;
283 }
284
285 /*
286  * try renew caps after session gets killed.
287  */
288 int ceph_renew_caps(struct inode *inode)
289 {
290         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
291         struct ceph_inode_info *ci = ceph_inode(inode);
292         struct ceph_mds_request *req;
293         int err, flags, wanted;
294
295         spin_lock(&ci->i_ceph_lock);
296         wanted = __ceph_caps_file_wanted(ci);
297         if (__ceph_is_any_real_caps(ci) &&
298             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
299                 int issued = __ceph_caps_issued(ci, NULL);
300                 spin_unlock(&ci->i_ceph_lock);
301                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
302                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
303                 ceph_check_caps(ci, 0, NULL);
304                 return 0;
305         }
306         spin_unlock(&ci->i_ceph_lock);
307
308         flags = 0;
309         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
310                 flags = O_RDWR;
311         else if (wanted & CEPH_CAP_FILE_RD)
312                 flags = O_RDONLY;
313         else if (wanted & CEPH_CAP_FILE_WR)
314                 flags = O_WRONLY;
315 #ifdef O_LAZY
316         if (wanted & CEPH_CAP_FILE_LAZYIO)
317                 flags |= O_LAZY;
318 #endif
319
320         req = prepare_open_request(inode->i_sb, flags, 0);
321         if (IS_ERR(req)) {
322                 err = PTR_ERR(req);
323                 goto out;
324         }
325
326         req->r_inode = inode;
327         ihold(inode);
328         req->r_num_caps = 1;
329         req->r_fmode = -1;
330
331         err = ceph_mdsc_do_request(mdsc, NULL, req);
332         ceph_mdsc_put_request(req);
333 out:
334         dout("renew caps %p open result=%d\n", inode, err);
335         return err < 0 ? err : 0;
336 }
337
338 /*
339  * If we already have the requisite capabilities, we can satisfy
340  * the open request locally (no need to request new caps from the
341  * MDS).  We do, however, need to inform the MDS (asynchronously)
342  * if our wanted caps set expands.
343  */
344 int ceph_open(struct inode *inode, struct file *file)
345 {
346         struct ceph_inode_info *ci = ceph_inode(inode);
347         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
348         struct ceph_mds_client *mdsc = fsc->mdsc;
349         struct ceph_mds_request *req;
350         struct ceph_file_info *fi = file->private_data;
351         int err;
352         int flags, fmode, wanted;
353
354         if (fi) {
355                 dout("open file %p is already opened\n", file);
356                 return 0;
357         }
358
359         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
360         flags = file->f_flags & ~(O_CREAT|O_EXCL);
361         if (S_ISDIR(inode->i_mode))
362                 flags = O_DIRECTORY;  /* mds likes to know */
363
364         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
365              ceph_vinop(inode), file, flags, file->f_flags);
366         fmode = ceph_flags_to_mode(flags);
367         wanted = ceph_caps_for_mode(fmode);
368
369         /* snapped files are read-only */
370         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
371                 return -EROFS;
372
373         /* trivially open snapdir */
374         if (ceph_snap(inode) == CEPH_SNAPDIR) {
375                 spin_lock(&ci->i_ceph_lock);
376                 __ceph_get_fmode(ci, fmode);
377                 spin_unlock(&ci->i_ceph_lock);
378                 return ceph_init_file(inode, file, fmode);
379         }
380
381         /*
382          * No need to block if we have caps on the auth MDS (for
383          * write) or any MDS (for read).  Update wanted set
384          * asynchronously.
385          */
386         spin_lock(&ci->i_ceph_lock);
387         if (__ceph_is_any_real_caps(ci) &&
388             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
389                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
390                 int issued = __ceph_caps_issued(ci, NULL);
391
392                 dout("open %p fmode %d want %s issued %s using existing\n",
393                      inode, fmode, ceph_cap_string(wanted),
394                      ceph_cap_string(issued));
395                 __ceph_get_fmode(ci, fmode);
396                 spin_unlock(&ci->i_ceph_lock);
397
398                 /* adjust wanted? */
399                 if ((issued & wanted) != wanted &&
400                     (mds_wanted & wanted) != wanted &&
401                     ceph_snap(inode) != CEPH_SNAPDIR)
402                         ceph_check_caps(ci, 0, NULL);
403
404                 return ceph_init_file(inode, file, fmode);
405         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
406                    (ci->i_snap_caps & wanted) == wanted) {
407                 __ceph_get_fmode(ci, fmode);
408                 spin_unlock(&ci->i_ceph_lock);
409                 return ceph_init_file(inode, file, fmode);
410         }
411
412         spin_unlock(&ci->i_ceph_lock);
413
414         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
415         req = prepare_open_request(inode->i_sb, flags, 0);
416         if (IS_ERR(req)) {
417                 err = PTR_ERR(req);
418                 goto out;
419         }
420         req->r_inode = inode;
421         ihold(inode);
422
423         req->r_num_caps = 1;
424         err = ceph_mdsc_do_request(mdsc, NULL, req);
425         if (!err)
426                 err = ceph_init_file(inode, file, req->r_fmode);
427         ceph_mdsc_put_request(req);
428         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
429 out:
430         return err;
431 }
432
433
434 /*
435  * Do a lookup + open with a single request.  If we get a non-existent
436  * file or symlink, return 1 so the VFS can retry.
437  */
438 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
439                      struct file *file, unsigned flags, umode_t mode)
440 {
441         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
442         struct ceph_mds_client *mdsc = fsc->mdsc;
443         struct ceph_mds_request *req;
444         struct dentry *dn;
445         struct ceph_acl_sec_ctx as_ctx = {};
446         int mask;
447         int err;
448
449         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
450              dir, dentry, dentry,
451              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
452
453         if (dentry->d_name.len > NAME_MAX)
454                 return -ENAMETOOLONG;
455
456         if (flags & O_CREAT) {
457                 if (ceph_quota_is_max_files_exceeded(dir))
458                         return -EDQUOT;
459                 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
460                 if (err < 0)
461                         return err;
462                 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
463                 if (err < 0)
464                         goto out_ctx;
465         } else if (!d_in_lookup(dentry)) {
466                 /* If it's not being looked up, it's negative */
467                 return -ENOENT;
468         }
469
470         /* do the open */
471         req = prepare_open_request(dir->i_sb, flags, mode);
472         if (IS_ERR(req)) {
473                 err = PTR_ERR(req);
474                 goto out_ctx;
475         }
476         req->r_dentry = dget(dentry);
477         req->r_num_caps = 2;
478         if (flags & O_CREAT) {
479                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
480                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
481                 if (as_ctx.pagelist) {
482                         req->r_pagelist = as_ctx.pagelist;
483                         as_ctx.pagelist = NULL;
484                 }
485         }
486
487        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
488        if (ceph_security_xattr_wanted(dir))
489                mask |= CEPH_CAP_XATTR_SHARED;
490        req->r_args.open.mask = cpu_to_le32(mask);
491
492         req->r_parent = dir;
493         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
494         err = ceph_mdsc_do_request(mdsc,
495                                    (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
496                                    req);
497         err = ceph_handle_snapdir(req, dentry, err);
498         if (err)
499                 goto out_req;
500
501         if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
502                 err = ceph_handle_notrace_create(dir, dentry);
503
504         if (d_in_lookup(dentry)) {
505                 dn = ceph_finish_lookup(req, dentry, err);
506                 if (IS_ERR(dn))
507                         err = PTR_ERR(dn);
508         } else {
509                 /* we were given a hashed negative dentry */
510                 dn = NULL;
511         }
512         if (err)
513                 goto out_req;
514         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
515                 /* make vfs retry on splice, ENOENT, or symlink */
516                 dout("atomic_open finish_no_open on dn %p\n", dn);
517                 err = finish_no_open(file, dn);
518         } else {
519                 dout("atomic_open finish_open on dn %p\n", dn);
520                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
521                         ceph_init_inode_acls(d_inode(dentry), &as_ctx);
522                         file->f_mode |= FMODE_CREATED;
523                 }
524                 err = finish_open(file, dentry, ceph_open);
525         }
526 out_req:
527         if (!req->r_err && req->r_target_inode)
528                 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
529         ceph_mdsc_put_request(req);
530 out_ctx:
531         ceph_release_acl_sec_ctx(&as_ctx);
532         dout("atomic_open result=%d\n", err);
533         return err;
534 }
535
536 int ceph_release(struct inode *inode, struct file *file)
537 {
538         struct ceph_inode_info *ci = ceph_inode(inode);
539
540         if (S_ISDIR(inode->i_mode)) {
541                 struct ceph_dir_file_info *dfi = file->private_data;
542                 dout("release inode %p dir file %p\n", inode, file);
543                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
544
545                 ceph_put_fmode(ci, dfi->file_info.fmode);
546
547                 if (dfi->last_readdir)
548                         ceph_mdsc_put_request(dfi->last_readdir);
549                 kfree(dfi->last_name);
550                 kfree(dfi->dir_info);
551                 kmem_cache_free(ceph_dir_file_cachep, dfi);
552         } else {
553                 struct ceph_file_info *fi = file->private_data;
554                 dout("release inode %p regular file %p\n", inode, file);
555                 WARN_ON(!list_empty(&fi->rw_contexts));
556
557                 ceph_put_fmode(ci, fi->fmode);
558                 kmem_cache_free(ceph_file_cachep, fi);
559         }
560
561         /* wake up anyone waiting for caps on this inode */
562         wake_up_all(&ci->i_cap_wq);
563         return 0;
564 }
565
566 enum {
567         HAVE_RETRIED = 1,
568         CHECK_EOF =    2,
569         READ_INLINE =  3,
570 };
571
572 /*
573  * Completely synchronous read and write methods.  Direct from __user
574  * buffer to osd, or directly to user pages (if O_DIRECT).
575  *
576  * If the read spans object boundary, just do multiple reads.  (That's not
577  * atomic, but good enough for now.)
578  *
579  * If we get a short result from the OSD, check against i_size; we need to
580  * only return a short read to the caller if we hit EOF.
581  */
582 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
583                               int *retry_op)
584 {
585         struct file *file = iocb->ki_filp;
586         struct inode *inode = file_inode(file);
587         struct ceph_inode_info *ci = ceph_inode(inode);
588         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
589         struct ceph_osd_client *osdc = &fsc->client->osdc;
590         ssize_t ret;
591         u64 off = iocb->ki_pos;
592         u64 len = iov_iter_count(to);
593
594         dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
595              (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
596
597         if (!len)
598                 return 0;
599         /*
600          * flush any page cache pages in this range.  this
601          * will make concurrent normal and sync io slow,
602          * but it will at least behave sensibly when they are
603          * in sequence.
604          */
605         ret = filemap_write_and_wait_range(inode->i_mapping,
606                                            off, off + len - 1);
607         if (ret < 0)
608                 return ret;
609
610         ret = 0;
611         while ((len = iov_iter_count(to)) > 0) {
612                 struct ceph_osd_request *req;
613                 struct page **pages;
614                 int num_pages;
615                 size_t page_off;
616                 u64 i_size;
617                 bool more;
618
619                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
620                                         ci->i_vino, off, &len, 0, 1,
621                                         CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
622                                         NULL, ci->i_truncate_seq,
623                                         ci->i_truncate_size, false);
624                 if (IS_ERR(req)) {
625                         ret = PTR_ERR(req);
626                         break;
627                 }
628
629                 more = len < iov_iter_count(to);
630
631                 if (unlikely(iov_iter_is_pipe(to))) {
632                         ret = iov_iter_get_pages_alloc(to, &pages, len,
633                                                        &page_off);
634                         if (ret <= 0) {
635                                 ceph_osdc_put_request(req);
636                                 ret = -ENOMEM;
637                                 break;
638                         }
639                         num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
640                         if (ret < len) {
641                                 len = ret;
642                                 osd_req_op_extent_update(req, 0, len);
643                                 more = false;
644                         }
645                 } else {
646                         num_pages = calc_pages_for(off, len);
647                         page_off = off & ~PAGE_MASK;
648                         pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
649                         if (IS_ERR(pages)) {
650                                 ceph_osdc_put_request(req);
651                                 ret = PTR_ERR(pages);
652                                 break;
653                         }
654                 }
655
656                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
657                                                  false, false);
658                 ret = ceph_osdc_start_request(osdc, req, false);
659                 if (!ret)
660                         ret = ceph_osdc_wait_request(osdc, req);
661                 ceph_osdc_put_request(req);
662
663                 i_size = i_size_read(inode);
664                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
665                      off, len, ret, i_size, (more ? " MORE" : ""));
666
667                 if (ret == -ENOENT)
668                         ret = 0;
669                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
670                         int zlen = min(len - ret, i_size - off - ret);
671                         int zoff = page_off + ret;
672                         dout("sync_read zero gap %llu~%llu\n",
673                              off + ret, off + ret + zlen);
674                         ceph_zero_page_vector_range(zoff, zlen, pages);
675                         ret += zlen;
676                 }
677
678                 if (unlikely(iov_iter_is_pipe(to))) {
679                         if (ret > 0) {
680                                 iov_iter_advance(to, ret);
681                                 off += ret;
682                         } else {
683                                 iov_iter_advance(to, 0);
684                         }
685                         ceph_put_page_vector(pages, num_pages, false);
686                 } else {
687                         int idx = 0;
688                         size_t left = ret > 0 ? ret : 0;
689                         while (left > 0) {
690                                 size_t len, copied;
691                                 page_off = off & ~PAGE_MASK;
692                                 len = min_t(size_t, left, PAGE_SIZE - page_off);
693                                 copied = copy_page_to_iter(pages[idx++],
694                                                            page_off, len, to);
695                                 off += copied;
696                                 left -= copied;
697                                 if (copied < len) {
698                                         ret = -EFAULT;
699                                         break;
700                                 }
701                         }
702                         ceph_release_page_vector(pages, num_pages);
703                 }
704
705                 if (ret < 0) {
706                         if (ret == -EBLACKLISTED)
707                                 fsc->blacklisted = true;
708                         break;
709                 }
710
711                 if (off >= i_size || !more)
712                         break;
713         }
714
715         if (off > iocb->ki_pos) {
716                 if (ret >= 0 &&
717                     iov_iter_count(to) > 0 && off >= i_size_read(inode))
718                         *retry_op = CHECK_EOF;
719                 ret = off - iocb->ki_pos;
720                 iocb->ki_pos = off;
721         }
722
723         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
724         return ret;
725 }
726
727 struct ceph_aio_request {
728         struct kiocb *iocb;
729         size_t total_len;
730         bool write;
731         bool should_dirty;
732         int error;
733         struct list_head osd_reqs;
734         unsigned num_reqs;
735         atomic_t pending_reqs;
736         struct timespec64 mtime;
737         struct ceph_cap_flush *prealloc_cf;
738 };
739
740 struct ceph_aio_work {
741         struct work_struct work;
742         struct ceph_osd_request *req;
743 };
744
745 static void ceph_aio_retry_work(struct work_struct *work);
746
747 static void ceph_aio_complete(struct inode *inode,
748                               struct ceph_aio_request *aio_req)
749 {
750         struct ceph_inode_info *ci = ceph_inode(inode);
751         int ret;
752
753         if (!atomic_dec_and_test(&aio_req->pending_reqs))
754                 return;
755
756         ret = aio_req->error;
757         if (!ret)
758                 ret = aio_req->total_len;
759
760         dout("ceph_aio_complete %p rc %d\n", inode, ret);
761
762         if (ret >= 0 && aio_req->write) {
763                 int dirty;
764
765                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
766                 if (endoff > i_size_read(inode)) {
767                         if (ceph_inode_set_size(inode, endoff))
768                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
769                 }
770
771                 spin_lock(&ci->i_ceph_lock);
772                 ci->i_inline_version = CEPH_INLINE_NONE;
773                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
774                                                &aio_req->prealloc_cf);
775                 spin_unlock(&ci->i_ceph_lock);
776                 if (dirty)
777                         __mark_inode_dirty(inode, dirty);
778
779         }
780
781         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
782                                                 CEPH_CAP_FILE_RD));
783
784         aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
785
786         ceph_free_cap_flush(aio_req->prealloc_cf);
787         kfree(aio_req);
788 }
789
790 static void ceph_aio_complete_req(struct ceph_osd_request *req)
791 {
792         int rc = req->r_result;
793         struct inode *inode = req->r_inode;
794         struct ceph_aio_request *aio_req = req->r_priv;
795         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
796
797         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
798         BUG_ON(!osd_data->num_bvecs);
799
800         dout("ceph_aio_complete_req %p rc %d bytes %u\n",
801              inode, rc, osd_data->bvec_pos.iter.bi_size);
802
803         if (rc == -EOLDSNAPC) {
804                 struct ceph_aio_work *aio_work;
805                 BUG_ON(!aio_req->write);
806
807                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
808                 if (aio_work) {
809                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
810                         aio_work->req = req;
811                         queue_work(ceph_inode_to_client(inode)->inode_wq,
812                                    &aio_work->work);
813                         return;
814                 }
815                 rc = -ENOMEM;
816         } else if (!aio_req->write) {
817                 if (rc == -ENOENT)
818                         rc = 0;
819                 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
820                         struct iov_iter i;
821                         int zlen = osd_data->bvec_pos.iter.bi_size - rc;
822
823                         /*
824                          * If read is satisfied by single OSD request,
825                          * it can pass EOF. Otherwise read is within
826                          * i_size.
827                          */
828                         if (aio_req->num_reqs == 1) {
829                                 loff_t i_size = i_size_read(inode);
830                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
831                                 if (endoff < i_size)
832                                         zlen = min_t(size_t, zlen,
833                                                      i_size - endoff);
834                                 aio_req->total_len = rc + zlen;
835                         }
836
837                         iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
838                                       osd_data->num_bvecs,
839                                       osd_data->bvec_pos.iter.bi_size);
840                         iov_iter_advance(&i, rc);
841                         iov_iter_zero(zlen, &i);
842                 }
843         }
844
845         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
846                   aio_req->should_dirty);
847         ceph_osdc_put_request(req);
848
849         if (rc < 0)
850                 cmpxchg(&aio_req->error, 0, rc);
851
852         ceph_aio_complete(inode, aio_req);
853         return;
854 }
855
856 static void ceph_aio_retry_work(struct work_struct *work)
857 {
858         struct ceph_aio_work *aio_work =
859                 container_of(work, struct ceph_aio_work, work);
860         struct ceph_osd_request *orig_req = aio_work->req;
861         struct ceph_aio_request *aio_req = orig_req->r_priv;
862         struct inode *inode = orig_req->r_inode;
863         struct ceph_inode_info *ci = ceph_inode(inode);
864         struct ceph_snap_context *snapc;
865         struct ceph_osd_request *req;
866         int ret;
867
868         spin_lock(&ci->i_ceph_lock);
869         if (__ceph_have_pending_cap_snap(ci)) {
870                 struct ceph_cap_snap *capsnap =
871                         list_last_entry(&ci->i_cap_snaps,
872                                         struct ceph_cap_snap,
873                                         ci_item);
874                 snapc = ceph_get_snap_context(capsnap->context);
875         } else {
876                 BUG_ON(!ci->i_head_snapc);
877                 snapc = ceph_get_snap_context(ci->i_head_snapc);
878         }
879         spin_unlock(&ci->i_ceph_lock);
880
881         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
882                         false, GFP_NOFS);
883         if (!req) {
884                 ret = -ENOMEM;
885                 req = orig_req;
886                 goto out;
887         }
888
889         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
890         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
891         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
892
893         req->r_ops[0] = orig_req->r_ops[0];
894
895         req->r_mtime = aio_req->mtime;
896         req->r_data_offset = req->r_ops[0].extent.offset;
897
898         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
899         if (ret) {
900                 ceph_osdc_put_request(req);
901                 req = orig_req;
902                 goto out;
903         }
904
905         ceph_osdc_put_request(orig_req);
906
907         req->r_callback = ceph_aio_complete_req;
908         req->r_inode = inode;
909         req->r_priv = aio_req;
910
911         ret = ceph_osdc_start_request(req->r_osdc, req, false);
912 out:
913         if (ret < 0) {
914                 req->r_result = ret;
915                 ceph_aio_complete_req(req);
916         }
917
918         ceph_put_snap_context(snapc);
919         kfree(aio_work);
920 }
921
922 static ssize_t
923 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
924                        struct ceph_snap_context *snapc,
925                        struct ceph_cap_flush **pcf)
926 {
927         struct file *file = iocb->ki_filp;
928         struct inode *inode = file_inode(file);
929         struct ceph_inode_info *ci = ceph_inode(inode);
930         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
931         struct ceph_vino vino;
932         struct ceph_osd_request *req;
933         struct bio_vec *bvecs;
934         struct ceph_aio_request *aio_req = NULL;
935         int num_pages = 0;
936         int flags;
937         int ret = 0;
938         struct timespec64 mtime = current_time(inode);
939         size_t count = iov_iter_count(iter);
940         loff_t pos = iocb->ki_pos;
941         bool write = iov_iter_rw(iter) == WRITE;
942         bool should_dirty = !write && iter_is_iovec(iter);
943
944         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
945                 return -EROFS;
946
947         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
948              (write ? "write" : "read"), file, pos, (unsigned)count,
949              snapc, snapc ? snapc->seq : 0);
950
951         if (write) {
952                 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
953                                         pos >> PAGE_SHIFT,
954                                         (pos + count - 1) >> PAGE_SHIFT);
955                 if (ret2 < 0)
956                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
957
958                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
959         } else {
960                 flags = CEPH_OSD_FLAG_READ;
961         }
962
963         while (iov_iter_count(iter) > 0) {
964                 u64 size = iov_iter_count(iter);
965                 ssize_t len;
966
967                 if (write)
968                         size = min_t(u64, size, fsc->mount_options->wsize);
969                 else
970                         size = min_t(u64, size, fsc->mount_options->rsize);
971
972                 vino = ceph_vino(inode);
973                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
974                                             vino, pos, &size, 0,
975                                             1,
976                                             write ? CEPH_OSD_OP_WRITE :
977                                                     CEPH_OSD_OP_READ,
978                                             flags, snapc,
979                                             ci->i_truncate_seq,
980                                             ci->i_truncate_size,
981                                             false);
982                 if (IS_ERR(req)) {
983                         ret = PTR_ERR(req);
984                         break;
985                 }
986
987                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
988                 if (len < 0) {
989                         ceph_osdc_put_request(req);
990                         ret = len;
991                         break;
992                 }
993                 if (len != size)
994                         osd_req_op_extent_update(req, 0, len);
995
996                 /*
997                  * To simplify error handling, allow AIO when IO within i_size
998                  * or IO can be satisfied by single OSD request.
999                  */
1000                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1001                     (len == count || pos + count <= i_size_read(inode))) {
1002                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1003                         if (aio_req) {
1004                                 aio_req->iocb = iocb;
1005                                 aio_req->write = write;
1006                                 aio_req->should_dirty = should_dirty;
1007                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
1008                                 if (write) {
1009                                         aio_req->mtime = mtime;
1010                                         swap(aio_req->prealloc_cf, *pcf);
1011                                 }
1012                         }
1013                         /* ignore error */
1014                 }
1015
1016                 if (write) {
1017                         /*
1018                          * throw out any page cache pages in this range. this
1019                          * may block.
1020                          */
1021                         truncate_inode_pages_range(inode->i_mapping, pos,
1022                                                    PAGE_ALIGN(pos + len) - 1);
1023
1024                         req->r_mtime = mtime;
1025                 }
1026
1027                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1028
1029                 if (aio_req) {
1030                         aio_req->total_len += len;
1031                         aio_req->num_reqs++;
1032                         atomic_inc(&aio_req->pending_reqs);
1033
1034                         req->r_callback = ceph_aio_complete_req;
1035                         req->r_inode = inode;
1036                         req->r_priv = aio_req;
1037                         list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1038
1039                         pos += len;
1040                         continue;
1041                 }
1042
1043                 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1044                 if (!ret)
1045                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1046
1047                 size = i_size_read(inode);
1048                 if (!write) {
1049                         if (ret == -ENOENT)
1050                                 ret = 0;
1051                         if (ret >= 0 && ret < len && pos + ret < size) {
1052                                 struct iov_iter i;
1053                                 int zlen = min_t(size_t, len - ret,
1054                                                  size - pos - ret);
1055
1056                                 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1057                                 iov_iter_advance(&i, ret);
1058                                 iov_iter_zero(zlen, &i);
1059                                 ret += zlen;
1060                         }
1061                         if (ret >= 0)
1062                                 len = ret;
1063                 }
1064
1065                 put_bvecs(bvecs, num_pages, should_dirty);
1066                 ceph_osdc_put_request(req);
1067                 if (ret < 0)
1068                         break;
1069
1070                 pos += len;
1071                 if (!write && pos >= size)
1072                         break;
1073
1074                 if (write && pos > size) {
1075                         if (ceph_inode_set_size(inode, pos))
1076                                 ceph_check_caps(ceph_inode(inode),
1077                                                 CHECK_CAPS_AUTHONLY,
1078                                                 NULL);
1079                 }
1080         }
1081
1082         if (aio_req) {
1083                 LIST_HEAD(osd_reqs);
1084
1085                 if (aio_req->num_reqs == 0) {
1086                         kfree(aio_req);
1087                         return ret;
1088                 }
1089
1090                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1091                                               CEPH_CAP_FILE_RD);
1092
1093                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1094                 while (!list_empty(&osd_reqs)) {
1095                         req = list_first_entry(&osd_reqs,
1096                                                struct ceph_osd_request,
1097                                                r_private_item);
1098                         list_del_init(&req->r_private_item);
1099                         if (ret >= 0)
1100                                 ret = ceph_osdc_start_request(req->r_osdc,
1101                                                               req, false);
1102                         if (ret < 0) {
1103                                 req->r_result = ret;
1104                                 ceph_aio_complete_req(req);
1105                         }
1106                 }
1107                 return -EIOCBQUEUED;
1108         }
1109
1110         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1111                 ret = pos - iocb->ki_pos;
1112                 iocb->ki_pos = pos;
1113         }
1114         return ret;
1115 }
1116
1117 /*
1118  * Synchronous write, straight from __user pointer or user pages.
1119  *
1120  * If write spans object boundary, just do multiple writes.  (For a
1121  * correct atomic write, we should e.g. take write locks on all
1122  * objects, rollback on failure, etc.)
1123  */
1124 static ssize_t
1125 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1126                 struct ceph_snap_context *snapc)
1127 {
1128         struct file *file = iocb->ki_filp;
1129         struct inode *inode = file_inode(file);
1130         struct ceph_inode_info *ci = ceph_inode(inode);
1131         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1132         struct ceph_vino vino;
1133         struct ceph_osd_request *req;
1134         struct page **pages;
1135         u64 len;
1136         int num_pages;
1137         int written = 0;
1138         int flags;
1139         int ret;
1140         bool check_caps = false;
1141         struct timespec64 mtime = current_time(inode);
1142         size_t count = iov_iter_count(from);
1143
1144         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1145                 return -EROFS;
1146
1147         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1148              file, pos, (unsigned)count, snapc, snapc->seq);
1149
1150         ret = filemap_write_and_wait_range(inode->i_mapping,
1151                                            pos, pos + count - 1);
1152         if (ret < 0)
1153                 return ret;
1154
1155         ret = invalidate_inode_pages2_range(inode->i_mapping,
1156                                             pos >> PAGE_SHIFT,
1157                                             (pos + count - 1) >> PAGE_SHIFT);
1158         if (ret < 0)
1159                 dout("invalidate_inode_pages2_range returned %d\n", ret);
1160
1161         flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1162
1163         while ((len = iov_iter_count(from)) > 0) {
1164                 size_t left;
1165                 int n;
1166
1167                 vino = ceph_vino(inode);
1168                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1169                                             vino, pos, &len, 0, 1,
1170                                             CEPH_OSD_OP_WRITE, flags, snapc,
1171                                             ci->i_truncate_seq,
1172                                             ci->i_truncate_size,
1173                                             false);
1174                 if (IS_ERR(req)) {
1175                         ret = PTR_ERR(req);
1176                         break;
1177                 }
1178
1179                 /*
1180                  * write from beginning of first page,
1181                  * regardless of io alignment
1182                  */
1183                 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1184
1185                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1186                 if (IS_ERR(pages)) {
1187                         ret = PTR_ERR(pages);
1188                         goto out;
1189                 }
1190
1191                 left = len;
1192                 for (n = 0; n < num_pages; n++) {
1193                         size_t plen = min_t(size_t, left, PAGE_SIZE);
1194                         ret = copy_page_from_iter(pages[n], 0, plen, from);
1195                         if (ret != plen) {
1196                                 ret = -EFAULT;
1197                                 break;
1198                         }
1199                         left -= ret;
1200                 }
1201
1202                 if (ret < 0) {
1203                         ceph_release_page_vector(pages, num_pages);
1204                         goto out;
1205                 }
1206
1207                 req->r_inode = inode;
1208
1209                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1210                                                 false, true);
1211
1212                 req->r_mtime = mtime;
1213                 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1214                 if (!ret)
1215                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1216
1217 out:
1218                 ceph_osdc_put_request(req);
1219                 if (ret != 0) {
1220                         ceph_set_error_write(ci);
1221                         break;
1222                 }
1223
1224                 ceph_clear_error_write(ci);
1225                 pos += len;
1226                 written += len;
1227                 if (pos > i_size_read(inode)) {
1228                         check_caps = ceph_inode_set_size(inode, pos);
1229                         if (check_caps)
1230                                 ceph_check_caps(ceph_inode(inode),
1231                                                 CHECK_CAPS_AUTHONLY,
1232                                                 NULL);
1233                 }
1234
1235         }
1236
1237         if (ret != -EOLDSNAPC && written > 0) {
1238                 ret = written;
1239                 iocb->ki_pos = pos;
1240         }
1241         return ret;
1242 }
1243
1244 /*
1245  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1246  * Atomically grab references, so that those bits are not released
1247  * back to the MDS mid-read.
1248  *
1249  * Hmm, the sync read case isn't actually async... should it be?
1250  */
1251 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1252 {
1253         struct file *filp = iocb->ki_filp;
1254         struct ceph_file_info *fi = filp->private_data;
1255         size_t len = iov_iter_count(to);
1256         struct inode *inode = file_inode(filp);
1257         struct ceph_inode_info *ci = ceph_inode(inode);
1258         struct page *pinned_page = NULL;
1259         ssize_t ret;
1260         int want, got = 0;
1261         int retry_op = 0, read = 0;
1262
1263 again:
1264         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1265              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1266
1267         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1268                 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1269         else
1270                 want = CEPH_CAP_FILE_CACHE;
1271         ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
1272                             &got, &pinned_page);
1273         if (ret < 0)
1274                 return ret;
1275
1276         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1277             (iocb->ki_flags & IOCB_DIRECT) ||
1278             (fi->flags & CEPH_F_SYNC)) {
1279
1280                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1281                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1282                      ceph_cap_string(got));
1283
1284                 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1285                         if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1286                                 ceph_start_io_direct(inode);
1287                                 ret = ceph_direct_read_write(iocb, to,
1288                                                              NULL, NULL);
1289                                 ceph_end_io_direct(inode);
1290                                 if (ret >= 0 && ret < len)
1291                                         retry_op = CHECK_EOF;
1292                         } else {
1293                                 ceph_start_io_read(inode);
1294                                 ret = ceph_sync_read(iocb, to, &retry_op);
1295                                 ceph_end_io_read(inode);
1296                         }
1297                 } else {
1298                         retry_op = READ_INLINE;
1299                 }
1300         } else {
1301                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1302                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1303                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1304                      ceph_cap_string(got));
1305                 ceph_add_rw_context(fi, &rw_ctx);
1306                 ceph_start_io_read(inode);
1307                 ret = generic_file_read_iter(iocb, to);
1308                 ceph_end_io_read(inode);
1309                 ceph_del_rw_context(fi, &rw_ctx);
1310         }
1311         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1312              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1313         if (pinned_page) {
1314                 put_page(pinned_page);
1315                 pinned_page = NULL;
1316         }
1317         ceph_put_cap_refs(ci, got);
1318         if (retry_op > HAVE_RETRIED && ret >= 0) {
1319                 int statret;
1320                 struct page *page = NULL;
1321                 loff_t i_size;
1322                 if (retry_op == READ_INLINE) {
1323                         page = __page_cache_alloc(GFP_KERNEL);
1324                         if (!page)
1325                                 return -ENOMEM;
1326                 }
1327
1328                 statret = __ceph_do_getattr(inode, page,
1329                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
1330                 if (statret < 0) {
1331                         if (page)
1332                                 __free_page(page);
1333                         if (statret == -ENODATA) {
1334                                 BUG_ON(retry_op != READ_INLINE);
1335                                 goto again;
1336                         }
1337                         return statret;
1338                 }
1339
1340                 i_size = i_size_read(inode);
1341                 if (retry_op == READ_INLINE) {
1342                         BUG_ON(ret > 0 || read > 0);
1343                         if (iocb->ki_pos < i_size &&
1344                             iocb->ki_pos < PAGE_SIZE) {
1345                                 loff_t end = min_t(loff_t, i_size,
1346                                                    iocb->ki_pos + len);
1347                                 end = min_t(loff_t, end, PAGE_SIZE);
1348                                 if (statret < end)
1349                                         zero_user_segment(page, statret, end);
1350                                 ret = copy_page_to_iter(page,
1351                                                 iocb->ki_pos & ~PAGE_MASK,
1352                                                 end - iocb->ki_pos, to);
1353                                 iocb->ki_pos += ret;
1354                                 read += ret;
1355                         }
1356                         if (iocb->ki_pos < i_size && read < len) {
1357                                 size_t zlen = min_t(size_t, len - read,
1358                                                     i_size - iocb->ki_pos);
1359                                 ret = iov_iter_zero(zlen, to);
1360                                 iocb->ki_pos += ret;
1361                                 read += ret;
1362                         }
1363                         __free_pages(page, 0);
1364                         return read;
1365                 }
1366
1367                 /* hit EOF or hole? */
1368                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1369                     ret < len) {
1370                         dout("sync_read hit hole, ppos %lld < size %lld"
1371                              ", reading more\n", iocb->ki_pos, i_size);
1372
1373                         read += ret;
1374                         len -= ret;
1375                         retry_op = HAVE_RETRIED;
1376                         goto again;
1377                 }
1378         }
1379
1380         if (ret >= 0)
1381                 ret += read;
1382
1383         return ret;
1384 }
1385
1386 /*
1387  * Take cap references to avoid releasing caps to MDS mid-write.
1388  *
1389  * If we are synchronous, and write with an old snap context, the OSD
1390  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1391  * dropping our cap refs and allowing the pending snap to logically
1392  * complete _before_ this write occurs.
1393  *
1394  * If we are near ENOSPC, write synchronously.
1395  */
1396 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1397 {
1398         struct file *file = iocb->ki_filp;
1399         struct ceph_file_info *fi = file->private_data;
1400         struct inode *inode = file_inode(file);
1401         struct ceph_inode_info *ci = ceph_inode(inode);
1402         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1403         struct ceph_cap_flush *prealloc_cf;
1404         ssize_t count, written = 0;
1405         int err, want, got;
1406         loff_t pos;
1407         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1408
1409         if (ceph_snap(inode) != CEPH_NOSNAP)
1410                 return -EROFS;
1411
1412         prealloc_cf = ceph_alloc_cap_flush();
1413         if (!prealloc_cf)
1414                 return -ENOMEM;
1415
1416 retry_snap:
1417         if (iocb->ki_flags & IOCB_DIRECT)
1418                 ceph_start_io_direct(inode);
1419         else
1420                 ceph_start_io_write(inode);
1421
1422         /* We can write back this queue in page reclaim */
1423         current->backing_dev_info = inode_to_bdi(inode);
1424
1425         if (iocb->ki_flags & IOCB_APPEND) {
1426                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1427                 if (err < 0)
1428                         goto out;
1429         }
1430
1431         err = generic_write_checks(iocb, from);
1432         if (err <= 0)
1433                 goto out;
1434
1435         pos = iocb->ki_pos;
1436         if (unlikely(pos >= limit)) {
1437                 err = -EFBIG;
1438                 goto out;
1439         } else {
1440                 iov_iter_truncate(from, limit - pos);
1441         }
1442
1443         count = iov_iter_count(from);
1444         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1445                 err = -EDQUOT;
1446                 goto out;
1447         }
1448
1449         err = file_remove_privs(file);
1450         if (err)
1451                 goto out;
1452
1453         err = file_update_time(file);
1454         if (err)
1455                 goto out;
1456
1457         inode_inc_iversion_raw(inode);
1458
1459         if (ci->i_inline_version != CEPH_INLINE_NONE) {
1460                 err = ceph_uninline_data(file, NULL);
1461                 if (err < 0)
1462                         goto out;
1463         }
1464
1465         /* FIXME: not complete since it doesn't account for being at quota */
1466         if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
1467                 err = -ENOSPC;
1468                 goto out;
1469         }
1470
1471         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1472              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1473         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1474                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1475         else
1476                 want = CEPH_CAP_FILE_BUFFER;
1477         got = 0;
1478         err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
1479                             &got, NULL);
1480         if (err < 0)
1481                 goto out;
1482
1483         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1484              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1485
1486         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1487             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1488             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1489                 struct ceph_snap_context *snapc;
1490                 struct iov_iter data;
1491
1492                 spin_lock(&ci->i_ceph_lock);
1493                 if (__ceph_have_pending_cap_snap(ci)) {
1494                         struct ceph_cap_snap *capsnap =
1495                                         list_last_entry(&ci->i_cap_snaps,
1496                                                         struct ceph_cap_snap,
1497                                                         ci_item);
1498                         snapc = ceph_get_snap_context(capsnap->context);
1499                 } else {
1500                         BUG_ON(!ci->i_head_snapc);
1501                         snapc = ceph_get_snap_context(ci->i_head_snapc);
1502                 }
1503                 spin_unlock(&ci->i_ceph_lock);
1504
1505                 /* we might need to revert back to that point */
1506                 data = *from;
1507                 if (iocb->ki_flags & IOCB_DIRECT) {
1508                         written = ceph_direct_read_write(iocb, &data, snapc,
1509                                                          &prealloc_cf);
1510                         ceph_end_io_direct(inode);
1511                 } else {
1512                         written = ceph_sync_write(iocb, &data, pos, snapc);
1513                         ceph_end_io_write(inode);
1514                 }
1515                 if (written > 0)
1516                         iov_iter_advance(from, written);
1517                 ceph_put_snap_context(snapc);
1518         } else {
1519                 /*
1520                  * No need to acquire the i_truncate_mutex. Because
1521                  * the MDS revokes Fwb caps before sending truncate
1522                  * message to us. We can't get Fwb cap while there
1523                  * are pending vmtruncate. So write and vmtruncate
1524                  * can not run at the same time
1525                  */
1526                 written = generic_perform_write(file, from, pos);
1527                 if (likely(written >= 0))
1528                         iocb->ki_pos = pos + written;
1529                 ceph_end_io_write(inode);
1530         }
1531
1532         if (written >= 0) {
1533                 int dirty;
1534
1535                 spin_lock(&ci->i_ceph_lock);
1536                 ci->i_inline_version = CEPH_INLINE_NONE;
1537                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1538                                                &prealloc_cf);
1539                 spin_unlock(&ci->i_ceph_lock);
1540                 if (dirty)
1541                         __mark_inode_dirty(inode, dirty);
1542                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1543                         ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1544         }
1545
1546         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1547              inode, ceph_vinop(inode), pos, (unsigned)count,
1548              ceph_cap_string(got));
1549         ceph_put_cap_refs(ci, got);
1550
1551         if (written == -EOLDSNAPC) {
1552                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1553                      inode, ceph_vinop(inode), pos, (unsigned)count);
1554                 goto retry_snap;
1555         }
1556
1557         if (written >= 0) {
1558                 if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
1559                         iocb->ki_flags |= IOCB_DSYNC;
1560                 written = generic_write_sync(iocb, written);
1561         }
1562
1563         goto out_unlocked;
1564 out:
1565         if (iocb->ki_flags & IOCB_DIRECT)
1566                 ceph_end_io_direct(inode);
1567         else
1568                 ceph_end_io_write(inode);
1569 out_unlocked:
1570         ceph_free_cap_flush(prealloc_cf);
1571         current->backing_dev_info = NULL;
1572         return written ? written : err;
1573 }
1574
1575 /*
1576  * llseek.  be sure to verify file size on SEEK_END.
1577  */
1578 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1579 {
1580         struct inode *inode = file->f_mapping->host;
1581         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1582         loff_t i_size;
1583         loff_t ret;
1584
1585         inode_lock(inode);
1586
1587         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1588                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1589                 if (ret < 0)
1590                         goto out;
1591         }
1592
1593         i_size = i_size_read(inode);
1594         switch (whence) {
1595         case SEEK_END:
1596                 offset += i_size;
1597                 break;
1598         case SEEK_CUR:
1599                 /*
1600                  * Here we special-case the lseek(fd, 0, SEEK_CUR)
1601                  * position-querying operation.  Avoid rewriting the "same"
1602                  * f_pos value back to the file because a concurrent read(),
1603                  * write() or lseek() might have altered it
1604                  */
1605                 if (offset == 0) {
1606                         ret = file->f_pos;
1607                         goto out;
1608                 }
1609                 offset += file->f_pos;
1610                 break;
1611         case SEEK_DATA:
1612                 if (offset < 0 || offset >= i_size) {
1613                         ret = -ENXIO;
1614                         goto out;
1615                 }
1616                 break;
1617         case SEEK_HOLE:
1618                 if (offset < 0 || offset >= i_size) {
1619                         ret = -ENXIO;
1620                         goto out;
1621                 }
1622                 offset = i_size;
1623                 break;
1624         }
1625
1626         ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1627
1628 out:
1629         inode_unlock(inode);
1630         return ret;
1631 }
1632
1633 static inline void ceph_zero_partial_page(
1634         struct inode *inode, loff_t offset, unsigned size)
1635 {
1636         struct page *page;
1637         pgoff_t index = offset >> PAGE_SHIFT;
1638
1639         page = find_lock_page(inode->i_mapping, index);
1640         if (page) {
1641                 wait_on_page_writeback(page);
1642                 zero_user(page, offset & (PAGE_SIZE - 1), size);
1643                 unlock_page(page);
1644                 put_page(page);
1645         }
1646 }
1647
1648 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1649                                       loff_t length)
1650 {
1651         loff_t nearly = round_up(offset, PAGE_SIZE);
1652         if (offset < nearly) {
1653                 loff_t size = nearly - offset;
1654                 if (length < size)
1655                         size = length;
1656                 ceph_zero_partial_page(inode, offset, size);
1657                 offset += size;
1658                 length -= size;
1659         }
1660         if (length >= PAGE_SIZE) {
1661                 loff_t size = round_down(length, PAGE_SIZE);
1662                 truncate_pagecache_range(inode, offset, offset + size - 1);
1663                 offset += size;
1664                 length -= size;
1665         }
1666         if (length)
1667                 ceph_zero_partial_page(inode, offset, length);
1668 }
1669
1670 static int ceph_zero_partial_object(struct inode *inode,
1671                                     loff_t offset, loff_t *length)
1672 {
1673         struct ceph_inode_info *ci = ceph_inode(inode);
1674         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1675         struct ceph_osd_request *req;
1676         int ret = 0;
1677         loff_t zero = 0;
1678         int op;
1679
1680         if (!length) {
1681                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1682                 length = &zero;
1683         } else {
1684                 op = CEPH_OSD_OP_ZERO;
1685         }
1686
1687         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1688                                         ceph_vino(inode),
1689                                         offset, length,
1690                                         0, 1, op,
1691                                         CEPH_OSD_FLAG_WRITE,
1692                                         NULL, 0, 0, false);
1693         if (IS_ERR(req)) {
1694                 ret = PTR_ERR(req);
1695                 goto out;
1696         }
1697
1698         req->r_mtime = inode->i_mtime;
1699         ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1700         if (!ret) {
1701                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1702                 if (ret == -ENOENT)
1703                         ret = 0;
1704         }
1705         ceph_osdc_put_request(req);
1706
1707 out:
1708         return ret;
1709 }
1710
1711 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1712 {
1713         int ret = 0;
1714         struct ceph_inode_info *ci = ceph_inode(inode);
1715         s32 stripe_unit = ci->i_layout.stripe_unit;
1716         s32 stripe_count = ci->i_layout.stripe_count;
1717         s32 object_size = ci->i_layout.object_size;
1718         u64 object_set_size = object_size * stripe_count;
1719         u64 nearly, t;
1720
1721         /* round offset up to next period boundary */
1722         nearly = offset + object_set_size - 1;
1723         t = nearly;
1724         nearly -= do_div(t, object_set_size);
1725
1726         while (length && offset < nearly) {
1727                 loff_t size = length;
1728                 ret = ceph_zero_partial_object(inode, offset, &size);
1729                 if (ret < 0)
1730                         return ret;
1731                 offset += size;
1732                 length -= size;
1733         }
1734         while (length >= object_set_size) {
1735                 int i;
1736                 loff_t pos = offset;
1737                 for (i = 0; i < stripe_count; ++i) {
1738                         ret = ceph_zero_partial_object(inode, pos, NULL);
1739                         if (ret < 0)
1740                                 return ret;
1741                         pos += stripe_unit;
1742                 }
1743                 offset += object_set_size;
1744                 length -= object_set_size;
1745         }
1746         while (length) {
1747                 loff_t size = length;
1748                 ret = ceph_zero_partial_object(inode, offset, &size);
1749                 if (ret < 0)
1750                         return ret;
1751                 offset += size;
1752                 length -= size;
1753         }
1754         return ret;
1755 }
1756
1757 static long ceph_fallocate(struct file *file, int mode,
1758                                 loff_t offset, loff_t length)
1759 {
1760         struct ceph_file_info *fi = file->private_data;
1761         struct inode *inode = file_inode(file);
1762         struct ceph_inode_info *ci = ceph_inode(inode);
1763         struct ceph_cap_flush *prealloc_cf;
1764         int want, got = 0;
1765         int dirty;
1766         int ret = 0;
1767         loff_t endoff = 0;
1768         loff_t size;
1769
1770         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1771                 return -EOPNOTSUPP;
1772
1773         if (!S_ISREG(inode->i_mode))
1774                 return -EOPNOTSUPP;
1775
1776         prealloc_cf = ceph_alloc_cap_flush();
1777         if (!prealloc_cf)
1778                 return -ENOMEM;
1779
1780         inode_lock(inode);
1781
1782         if (ceph_snap(inode) != CEPH_NOSNAP) {
1783                 ret = -EROFS;
1784                 goto unlock;
1785         }
1786
1787         if (ci->i_inline_version != CEPH_INLINE_NONE) {
1788                 ret = ceph_uninline_data(file, NULL);
1789                 if (ret < 0)
1790                         goto unlock;
1791         }
1792
1793         size = i_size_read(inode);
1794
1795         /* Are we punching a hole beyond EOF? */
1796         if (offset >= size)
1797                 goto unlock;
1798         if ((offset + length) > size)
1799                 length = size - offset;
1800
1801         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1802                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1803         else
1804                 want = CEPH_CAP_FILE_BUFFER;
1805
1806         ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1807         if (ret < 0)
1808                 goto unlock;
1809
1810         ceph_zero_pagecache_range(inode, offset, length);
1811         ret = ceph_zero_objects(inode, offset, length);
1812
1813         if (!ret) {
1814                 spin_lock(&ci->i_ceph_lock);
1815                 ci->i_inline_version = CEPH_INLINE_NONE;
1816                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1817                                                &prealloc_cf);
1818                 spin_unlock(&ci->i_ceph_lock);
1819                 if (dirty)
1820                         __mark_inode_dirty(inode, dirty);
1821         }
1822
1823         ceph_put_cap_refs(ci, got);
1824 unlock:
1825         inode_unlock(inode);
1826         ceph_free_cap_flush(prealloc_cf);
1827         return ret;
1828 }
1829
1830 /*
1831  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1832  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
1833  * this fails; zero is returned on success.
1834  */
1835 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
1836                           struct file *dst_filp,
1837                           loff_t dst_endoff, int *dst_got)
1838 {
1839         int ret = 0;
1840         bool retrying = false;
1841
1842 retry_caps:
1843         ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
1844                             dst_endoff, dst_got, NULL);
1845         if (ret < 0)
1846                 return ret;
1847
1848         /*
1849          * Since we're already holding the FILE_WR capability for the dst file,
1850          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
1851          * retry dance instead to try to get both capabilities.
1852          */
1853         ret = ceph_try_get_caps(file_inode(src_filp),
1854                                 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
1855                                 false, src_got);
1856         if (ret <= 0) {
1857                 /* Start by dropping dst_ci caps and getting src_ci caps */
1858                 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
1859                 if (retrying) {
1860                         if (!ret)
1861                                 /* ceph_try_get_caps masks EAGAIN */
1862                                 ret = -EAGAIN;
1863                         return ret;
1864                 }
1865                 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
1866                                     CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
1867                 if (ret < 0)
1868                         return ret;
1869                 /*... drop src_ci caps too, and retry */
1870                 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
1871                 retrying = true;
1872                 goto retry_caps;
1873         }
1874         return ret;
1875 }
1876
1877 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
1878                            struct ceph_inode_info *dst_ci, int dst_got)
1879 {
1880         ceph_put_cap_refs(src_ci, src_got);
1881         ceph_put_cap_refs(dst_ci, dst_got);
1882 }
1883
1884 /*
1885  * This function does several size-related checks, returning an error if:
1886  *  - source file is smaller than off+len
1887  *  - destination file size is not OK (inode_newsize_ok())
1888  *  - max bytes quotas is exceeded
1889  */
1890 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
1891                            loff_t src_off, loff_t dst_off, size_t len)
1892 {
1893         loff_t size, endoff;
1894
1895         size = i_size_read(src_inode);
1896         /*
1897          * Don't copy beyond source file EOF.  Instead of simply setting length
1898          * to (size - src_off), just drop to VFS default implementation, as the
1899          * local i_size may be stale due to other clients writing to the source
1900          * inode.
1901          */
1902         if (src_off + len > size) {
1903                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1904                      src_off, len, size);
1905                 return -EOPNOTSUPP;
1906         }
1907         size = i_size_read(dst_inode);
1908
1909         endoff = dst_off + len;
1910         if (inode_newsize_ok(dst_inode, endoff))
1911                 return -EOPNOTSUPP;
1912
1913         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
1914                 return -EDQUOT;
1915
1916         return 0;
1917 }
1918
1919 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
1920                                       struct file *dst_file, loff_t dst_off,
1921                                       size_t len, unsigned int flags)
1922 {
1923         struct inode *src_inode = file_inode(src_file);
1924         struct inode *dst_inode = file_inode(dst_file);
1925         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
1926         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
1927         struct ceph_cap_flush *prealloc_cf;
1928         struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
1929         struct ceph_object_locator src_oloc, dst_oloc;
1930         struct ceph_object_id src_oid, dst_oid;
1931         loff_t endoff = 0, size;
1932         ssize_t ret = -EIO;
1933         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
1934         u32 src_objlen, dst_objlen, object_size;
1935         int src_got = 0, dst_got = 0, err, dirty;
1936         bool do_final_copy = false;
1937
1938         if (src_inode->i_sb != dst_inode->i_sb) {
1939                 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
1940
1941                 if (ceph_fsid_compare(&src_fsc->client->fsid,
1942                                       &dst_fsc->client->fsid)) {
1943                         dout("Copying files across clusters: src: %pU dst: %pU\n",
1944                              &src_fsc->client->fsid, &dst_fsc->client->fsid);
1945                         return -EXDEV;
1946                 }
1947         }
1948         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
1949                 return -EROFS;
1950
1951         /*
1952          * Some of the checks below will return -EOPNOTSUPP, which will force a
1953          * fallback to the default VFS copy_file_range implementation.  This is
1954          * desirable in several cases (for ex, the 'len' is smaller than the
1955          * size of the objects, or in cases where that would be more
1956          * efficient).
1957          */
1958
1959         if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
1960                 return -EOPNOTSUPP;
1961
1962         /*
1963          * Striped file layouts require that we copy partial objects, but the
1964          * OSD copy-from operation only supports full-object copies.  Limit
1965          * this to non-striped file layouts for now.
1966          */
1967         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1968             (src_ci->i_layout.stripe_count != 1) ||
1969             (dst_ci->i_layout.stripe_count != 1) ||
1970             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
1971                 dout("Invalid src/dst files layout\n");
1972                 return -EOPNOTSUPP;
1973         }
1974
1975         if (len < src_ci->i_layout.object_size)
1976                 return -EOPNOTSUPP; /* no remote copy will be done */
1977
1978         prealloc_cf = ceph_alloc_cap_flush();
1979         if (!prealloc_cf)
1980                 return -ENOMEM;
1981
1982         /* Start by sync'ing the source and destination files */
1983         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1984         if (ret < 0) {
1985                 dout("failed to write src file (%zd)\n", ret);
1986                 goto out;
1987         }
1988         ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
1989         if (ret < 0) {
1990                 dout("failed to write dst file (%zd)\n", ret);
1991                 goto out;
1992         }
1993
1994         /*
1995          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
1996          * clients may have dirty data in their caches.  And OSDs know nothing
1997          * about caps, so they can't safely do the remote object copies.
1998          */
1999         err = get_rd_wr_caps(src_file, &src_got,
2000                              dst_file, (dst_off + len), &dst_got);
2001         if (err < 0) {
2002                 dout("get_rd_wr_caps returned %d\n", err);
2003                 ret = -EOPNOTSUPP;
2004                 goto out;
2005         }
2006
2007         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2008         if (ret < 0)
2009                 goto out_caps;
2010
2011         size = i_size_read(dst_inode);
2012         endoff = dst_off + len;
2013
2014         /* Drop dst file cached pages */
2015         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2016                                             dst_off >> PAGE_SHIFT,
2017                                             endoff >> PAGE_SHIFT);
2018         if (ret < 0) {
2019                 dout("Failed to invalidate inode pages (%zd)\n", ret);
2020                 ret = 0; /* XXX */
2021         }
2022         src_oloc.pool = src_ci->i_layout.pool_id;
2023         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2024         dst_oloc.pool = dst_ci->i_layout.pool_id;
2025         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2026
2027         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2028                                       src_ci->i_layout.object_size,
2029                                       &src_objnum, &src_objoff, &src_objlen);
2030         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2031                                       dst_ci->i_layout.object_size,
2032                                       &dst_objnum, &dst_objoff, &dst_objlen);
2033         /* object-level offsets need to the same */
2034         if (src_objoff != dst_objoff) {
2035                 ret = -EOPNOTSUPP;
2036                 goto out_caps;
2037         }
2038
2039         /*
2040          * Do a manual copy if the object offset isn't object aligned.
2041          * 'src_objlen' contains the bytes left until the end of the object,
2042          * starting at the src_off
2043          */
2044         if (src_objoff) {
2045                 /*
2046                  * we need to temporarily drop all caps as we'll be calling
2047                  * {read,write}_iter, which will get caps again.
2048                  */
2049                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2050                 ret = do_splice_direct(src_file, &src_off, dst_file,
2051                                        &dst_off, src_objlen, flags);
2052                 if (ret < 0) {
2053                         dout("do_splice_direct returned %d\n", err);
2054                         goto out;
2055                 }
2056                 len -= ret;
2057                 err = get_rd_wr_caps(src_file, &src_got,
2058                                      dst_file, (dst_off + len), &dst_got);
2059                 if (err < 0)
2060                         goto out;
2061                 err = is_file_size_ok(src_inode, dst_inode,
2062                                       src_off, dst_off, len);
2063                 if (err < 0)
2064                         goto out_caps;
2065         }
2066         object_size = src_ci->i_layout.object_size;
2067         while (len >= object_size) {
2068                 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2069                                               object_size, &src_objnum,
2070                                               &src_objoff, &src_objlen);
2071                 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2072                                               object_size, &dst_objnum,
2073                                               &dst_objoff, &dst_objlen);
2074                 ceph_oid_init(&src_oid);
2075                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2076                                 src_ci->i_vino.ino, src_objnum);
2077                 ceph_oid_init(&dst_oid);
2078                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2079                                 dst_ci->i_vino.ino, dst_objnum);
2080                 /* Do an object remote copy */
2081                 err = ceph_osdc_copy_from(
2082                         &src_fsc->client->osdc,
2083                         src_ci->i_vino.snap, 0,
2084                         &src_oid, &src_oloc,
2085                         CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2086                         CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2087                         &dst_oid, &dst_oloc,
2088                         CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2089                         CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
2090                 if (err) {
2091                         dout("ceph_osdc_copy_from returned %d\n", err);
2092                         if (!ret)
2093                                 ret = err;
2094                         goto out_caps;
2095                 }
2096                 len -= object_size;
2097                 src_off += object_size;
2098                 dst_off += object_size;
2099                 ret += object_size;
2100         }
2101
2102         if (len)
2103                 /* We still need one final local copy */
2104                 do_final_copy = true;
2105
2106         file_update_time(dst_file);
2107         inode_inc_iversion_raw(dst_inode);
2108
2109         if (endoff > size) {
2110                 int caps_flags = 0;
2111
2112                 /* Let the MDS know about dst file size change */
2113                 if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
2114                         caps_flags |= CHECK_CAPS_NODELAY;
2115                 if (ceph_inode_set_size(dst_inode, endoff))
2116                         caps_flags |= CHECK_CAPS_AUTHONLY;
2117                 if (caps_flags)
2118                         ceph_check_caps(dst_ci, caps_flags, NULL);
2119         }
2120         /* Mark Fw dirty */
2121         spin_lock(&dst_ci->i_ceph_lock);
2122         dst_ci->i_inline_version = CEPH_INLINE_NONE;
2123         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2124         spin_unlock(&dst_ci->i_ceph_lock);
2125         if (dirty)
2126                 __mark_inode_dirty(dst_inode, dirty);
2127
2128 out_caps:
2129         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2130
2131         if (do_final_copy) {
2132                 err = do_splice_direct(src_file, &src_off, dst_file,
2133                                        &dst_off, len, flags);
2134                 if (err < 0) {
2135                         dout("do_splice_direct returned %d\n", err);
2136                         goto out;
2137                 }
2138                 len -= err;
2139                 ret += err;
2140         }
2141
2142 out:
2143         ceph_free_cap_flush(prealloc_cf);
2144
2145         return ret;
2146 }
2147
2148 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2149                                     struct file *dst_file, loff_t dst_off,
2150                                     size_t len, unsigned int flags)
2151 {
2152         ssize_t ret;
2153
2154         ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2155                                      len, flags);
2156
2157         if (ret == -EOPNOTSUPP || ret == -EXDEV)
2158                 ret = generic_copy_file_range(src_file, src_off, dst_file,
2159                                               dst_off, len, flags);
2160         return ret;
2161 }
2162
2163 const struct file_operations ceph_file_fops = {
2164         .open = ceph_open,
2165         .release = ceph_release,
2166         .llseek = ceph_llseek,
2167         .read_iter = ceph_read_iter,
2168         .write_iter = ceph_write_iter,
2169         .mmap = ceph_mmap,
2170         .fsync = ceph_fsync,
2171         .lock = ceph_lock,
2172         .flock = ceph_flock,
2173         .splice_read = generic_file_splice_read,
2174         .splice_write = iter_file_splice_write,
2175         .unlocked_ioctl = ceph_ioctl,
2176         .compat_ioctl   = ceph_ioctl,
2177         .fallocate      = ceph_fallocate,
2178         .copy_file_range = ceph_copy_file_range,
2179 };