1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/mount.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 #include <linux/falloc.h>
13 #include "mds_client.h"
17 * Ceph file operations
19 * Implement basic open/close functionality, and implement
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
41 static size_t dio_get_pagev_size(const struct iov_iter *it)
43 const struct iovec *iov = it->iov;
44 const struct iovec *iovend = iov + it->nr_segs;
47 size = iov->iov_len - it->iov_offset;
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
56 dout("dio_get_pagevlen len = %zu\n", size);
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 size_t *page_align, int *num_pages)
69 struct iov_iter tmp_it = *it;
72 int ret = 0, idx, npages;
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
79 pages = vmalloc(sizeof(*pages) * npages);
81 return ERR_PTR(-ENOMEM);
84 for (idx = 0; idx < npages; ) {
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 npages - idx, &start);
91 iov_iter_advance(&tmp_it, ret);
93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
102 ceph_put_page_vector(pages, idx, false);
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
110 static struct ceph_mds_request *
111 prepare_open_request(struct super_block *sb, int flags, int create_mode)
113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 struct ceph_mds_client *mdsc = fsc->mdsc;
115 struct ceph_mds_request *req;
116 int want_auth = USE_ANY_MDS;
117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 want_auth = USE_AUTH_MDS;
122 req = ceph_mdsc_create_request(mdsc, op, want_auth);
125 req->r_fmode = ceph_flags_to_mode(flags);
126 req->r_args.open.flags = cpu_to_le32(flags);
127 req->r_args.open.mode = cpu_to_le32(create_mode);
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
138 struct ceph_file_info *cf;
140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 struct ceph_mds_client *mdsc = fsc->mdsc;
144 switch (inode->i_mode & S_IFMT) {
146 /* First file open request creates the cookie, we want to keep
147 * this cookie around for the filetime of the inode as not to
148 * have to worry about fscache register / revoke / operation
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 if ((fmode & CEPH_FILE_MODE_WR))
156 ceph_fscache_invalidate(inode);
158 dout("init_file %p %p 0%o (regular)\n", inode, file,
160 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
162 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
167 cf->readdir_cache_idx = -1;
168 file->private_data = cf;
169 BUG_ON(inode->i_fop->release != ceph_release);
173 dout("init_file %p %p 0%o (symlink)\n", inode, file,
175 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
179 dout("init_file %p %p 0%o (special)\n", inode, file,
182 * we need to drop the open ref now, since we don't
183 * have .release set to ceph_release.
185 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
186 BUG_ON(inode->i_fop->release == ceph_release);
188 /* call the proper open fop */
189 ret = inode->i_fop->open(inode, file);
195 * try renew caps after session gets killed.
197 int ceph_renew_caps(struct inode *inode)
199 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
200 struct ceph_inode_info *ci = ceph_inode(inode);
201 struct ceph_mds_request *req;
202 int err, flags, wanted;
204 spin_lock(&ci->i_ceph_lock);
205 wanted = __ceph_caps_file_wanted(ci);
206 if (__ceph_is_any_real_caps(ci) &&
207 (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
208 int issued = __ceph_caps_issued(ci, NULL);
209 spin_unlock(&ci->i_ceph_lock);
210 dout("renew caps %p want %s issued %s updating mds_wanted\n",
211 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
212 ceph_check_caps(ci, 0, NULL);
215 spin_unlock(&ci->i_ceph_lock);
218 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
220 else if (wanted & CEPH_CAP_FILE_RD)
222 else if (wanted & CEPH_CAP_FILE_WR)
225 if (wanted & CEPH_CAP_FILE_LAZYIO)
229 req = prepare_open_request(inode->i_sb, flags, 0);
235 req->r_inode = inode;
240 err = ceph_mdsc_do_request(mdsc, NULL, req);
241 ceph_mdsc_put_request(req);
243 dout("renew caps %p open result=%d\n", inode, err);
244 return err < 0 ? err : 0;
248 * If we already have the requisite capabilities, we can satisfy
249 * the open request locally (no need to request new caps from the
250 * MDS). We do, however, need to inform the MDS (asynchronously)
251 * if our wanted caps set expands.
253 int ceph_open(struct inode *inode, struct file *file)
255 struct ceph_inode_info *ci = ceph_inode(inode);
256 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
257 struct ceph_mds_client *mdsc = fsc->mdsc;
258 struct ceph_mds_request *req;
259 struct ceph_file_info *cf = file->private_data;
261 int flags, fmode, wanted;
264 dout("open file %p is already opened\n", file);
268 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
269 flags = file->f_flags & ~(O_CREAT|O_EXCL);
270 if (S_ISDIR(inode->i_mode))
271 flags = O_DIRECTORY; /* mds likes to know */
273 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
274 ceph_vinop(inode), file, flags, file->f_flags);
275 fmode = ceph_flags_to_mode(flags);
276 wanted = ceph_caps_for_mode(fmode);
278 /* snapped files are read-only */
279 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
282 /* trivially open snapdir */
283 if (ceph_snap(inode) == CEPH_SNAPDIR) {
284 spin_lock(&ci->i_ceph_lock);
285 __ceph_get_fmode(ci, fmode);
286 spin_unlock(&ci->i_ceph_lock);
287 return ceph_init_file(inode, file, fmode);
291 * No need to block if we have caps on the auth MDS (for
292 * write) or any MDS (for read). Update wanted set
295 spin_lock(&ci->i_ceph_lock);
296 if (__ceph_is_any_real_caps(ci) &&
297 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
298 int mds_wanted = __ceph_caps_mds_wanted(ci);
299 int issued = __ceph_caps_issued(ci, NULL);
301 dout("open %p fmode %d want %s issued %s using existing\n",
302 inode, fmode, ceph_cap_string(wanted),
303 ceph_cap_string(issued));
304 __ceph_get_fmode(ci, fmode);
305 spin_unlock(&ci->i_ceph_lock);
308 if ((issued & wanted) != wanted &&
309 (mds_wanted & wanted) != wanted &&
310 ceph_snap(inode) != CEPH_SNAPDIR)
311 ceph_check_caps(ci, 0, NULL);
313 return ceph_init_file(inode, file, fmode);
314 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
315 (ci->i_snap_caps & wanted) == wanted) {
316 __ceph_get_fmode(ci, fmode);
317 spin_unlock(&ci->i_ceph_lock);
318 return ceph_init_file(inode, file, fmode);
321 spin_unlock(&ci->i_ceph_lock);
323 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
324 req = prepare_open_request(inode->i_sb, flags, 0);
329 req->r_inode = inode;
333 err = ceph_mdsc_do_request(mdsc, NULL, req);
335 err = ceph_init_file(inode, file, req->r_fmode);
336 ceph_mdsc_put_request(req);
337 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
344 * Do a lookup + open with a single request. If we get a non-existent
345 * file or symlink, return 1 so the VFS can retry.
347 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
348 struct file *file, unsigned flags, umode_t mode,
351 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
352 struct ceph_mds_client *mdsc = fsc->mdsc;
353 struct ceph_mds_request *req;
355 struct ceph_acls_info acls = {};
359 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
361 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
363 if (dentry->d_name.len > NAME_MAX)
364 return -ENAMETOOLONG;
366 err = ceph_init_dentry(dentry);
370 if (flags & O_CREAT) {
371 err = ceph_pre_init_acls(dir, &mode, &acls);
377 req = prepare_open_request(dir->i_sb, flags, mode);
382 req->r_dentry = dget(dentry);
384 if (flags & O_CREAT) {
385 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
386 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
388 req->r_pagelist = acls.pagelist;
389 acls.pagelist = NULL;
393 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
394 if (ceph_security_xattr_wanted(dir))
395 mask |= CEPH_CAP_XATTR_SHARED;
396 req->r_args.open.mask = cpu_to_le32(mask);
398 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
399 err = ceph_mdsc_do_request(mdsc,
400 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
402 err = ceph_handle_snapdir(req, dentry, err);
406 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
407 err = ceph_handle_notrace_create(dir, dentry);
409 if (d_unhashed(dentry)) {
410 dn = ceph_finish_lookup(req, dentry, err);
414 /* we were given a hashed negative dentry */
419 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
420 /* make vfs retry on splice, ENOENT, or symlink */
421 dout("atomic_open finish_no_open on dn %p\n", dn);
422 err = finish_no_open(file, dn);
424 dout("atomic_open finish_open on dn %p\n", dn);
425 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
426 ceph_init_inode_acls(d_inode(dentry), &acls);
427 *opened |= FILE_CREATED;
429 err = finish_open(file, dentry, ceph_open, opened);
432 if (!req->r_err && req->r_target_inode)
433 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
434 ceph_mdsc_put_request(req);
436 ceph_release_acls_info(&acls);
437 dout("atomic_open result=%d\n", err);
441 int ceph_release(struct inode *inode, struct file *file)
443 struct ceph_inode_info *ci = ceph_inode(inode);
444 struct ceph_file_info *cf = file->private_data;
446 dout("release inode %p file %p\n", inode, file);
447 ceph_put_fmode(ci, cf->fmode);
448 if (cf->last_readdir)
449 ceph_mdsc_put_request(cf->last_readdir);
450 kfree(cf->last_name);
452 kmem_cache_free(ceph_file_cachep, cf);
454 /* wake up anyone waiting for caps on this inode */
455 wake_up_all(&ci->i_cap_wq);
466 * Read a range of bytes striped over one or more objects. Iterate over
467 * objects we stripe over. (That's not atomic, but good enough for now.)
469 * If we get a short result from the OSD, check against i_size; we need to
470 * only return a short read to the caller if we hit EOF.
472 static int striped_read(struct inode *inode,
474 struct page **pages, int num_pages,
477 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
478 struct ceph_inode_info *ci = ceph_inode(inode);
479 u64 pos, this_len, left;
481 int page_align, pages_left;
483 struct page **page_pos;
484 bool hit_stripe, was_short;
487 * we may need to do multiple reads. not atomic, unfortunately.
492 pages_left = num_pages;
496 page_align = pos & ~PAGE_MASK;
498 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
499 &ci->i_layout, pos, &this_len,
502 page_pos, pages_left, page_align);
505 hit_stripe = this_len < left;
506 was_short = ret >= 0 && ret < this_len;
507 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
508 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
510 i_size = i_size_read(inode);
513 if (was_short && (pos + ret < i_size)) {
514 int zlen = min(this_len - ret, i_size - pos - ret);
515 int zoff = (off & ~PAGE_MASK) + read + ret;
516 dout(" zero gap %llu to %llu\n",
517 pos + ret, pos + ret + zlen);
518 ceph_zero_page_vector_range(zoff, zlen, pages);
522 didpages = (page_align + ret) >> PAGE_SHIFT;
526 page_pos += didpages;
527 pages_left -= didpages;
529 /* hit stripe and need continue*/
530 if (left && hit_stripe && pos < i_size)
536 /* did we bounce off eof? */
537 if (pos + left > i_size)
538 *checkeof = CHECK_EOF;
541 dout("striped_read returns %d\n", ret);
546 * Completely synchronous read and write methods. Direct from __user
547 * buffer to osd, or directly to user pages (if O_DIRECT).
549 * If the read spans object boundary, just do multiple reads.
551 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
554 struct file *file = iocb->ki_filp;
555 struct inode *inode = file_inode(file);
557 u64 off = iocb->ki_pos;
559 size_t len = iov_iter_count(i);
561 dout("sync_read on file %p %llu~%u %s\n", file, off,
563 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
568 * flush any page cache pages in this range. this
569 * will make concurrent normal and sync io slow,
570 * but it will at least behave sensibly when they are
573 ret = filemap_write_and_wait_range(inode->i_mapping, off,
578 num_pages = calc_pages_for(off, len);
579 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
581 return PTR_ERR(pages);
582 ret = striped_read(inode, off, len, pages,
583 num_pages, checkeof);
589 size_t page_off = off & ~PAGE_MASK;
590 size_t copy = min_t(size_t, left,
591 PAGE_SIZE - page_off);
592 l = copy_page_to_iter(pages[k++], page_off, copy, i);
599 ceph_release_page_vector(pages, num_pages);
601 if (off > iocb->ki_pos) {
602 ret = off - iocb->ki_pos;
606 dout("sync_read result %d\n", ret);
610 struct ceph_aio_request {
615 struct list_head osd_reqs;
617 atomic_t pending_reqs;
618 struct timespec mtime;
619 struct ceph_cap_flush *prealloc_cf;
622 struct ceph_aio_work {
623 struct work_struct work;
624 struct ceph_osd_request *req;
627 static void ceph_aio_retry_work(struct work_struct *work);
629 static void ceph_aio_complete(struct inode *inode,
630 struct ceph_aio_request *aio_req)
632 struct ceph_inode_info *ci = ceph_inode(inode);
635 if (!atomic_dec_and_test(&aio_req->pending_reqs))
638 ret = aio_req->error;
640 ret = aio_req->total_len;
642 dout("ceph_aio_complete %p rc %d\n", inode, ret);
644 if (ret >= 0 && aio_req->write) {
647 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
648 if (endoff > i_size_read(inode)) {
649 if (ceph_inode_set_size(inode, endoff))
650 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
653 spin_lock(&ci->i_ceph_lock);
654 ci->i_inline_version = CEPH_INLINE_NONE;
655 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
656 &aio_req->prealloc_cf);
657 spin_unlock(&ci->i_ceph_lock);
659 __mark_inode_dirty(inode, dirty);
663 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
666 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
668 ceph_free_cap_flush(aio_req->prealloc_cf);
672 static void ceph_aio_complete_req(struct ceph_osd_request *req)
674 int rc = req->r_result;
675 struct inode *inode = req->r_inode;
676 struct ceph_aio_request *aio_req = req->r_priv;
677 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
678 int num_pages = calc_pages_for((u64)osd_data->alignment,
681 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
682 inode, rc, osd_data->length);
684 if (rc == -EOLDSNAPC) {
685 struct ceph_aio_work *aio_work;
686 BUG_ON(!aio_req->write);
688 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
690 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
692 queue_work(ceph_inode_to_client(inode)->wb_wq,
697 } else if (!aio_req->write) {
700 if (rc >= 0 && osd_data->length > rc) {
701 int zoff = osd_data->alignment + rc;
702 int zlen = osd_data->length - rc;
704 * If read is satisfied by single OSD request,
705 * it can pass EOF. Otherwise read is within
708 if (aio_req->num_reqs == 1) {
709 loff_t i_size = i_size_read(inode);
710 loff_t endoff = aio_req->iocb->ki_pos + rc;
712 zlen = min_t(size_t, zlen,
714 aio_req->total_len = rc + zlen;
718 ceph_zero_page_vector_range(zoff, zlen,
723 ceph_put_page_vector(osd_data->pages, num_pages, false);
724 ceph_osdc_put_request(req);
727 cmpxchg(&aio_req->error, 0, rc);
729 ceph_aio_complete(inode, aio_req);
733 static void ceph_aio_retry_work(struct work_struct *work)
735 struct ceph_aio_work *aio_work =
736 container_of(work, struct ceph_aio_work, work);
737 struct ceph_osd_request *orig_req = aio_work->req;
738 struct ceph_aio_request *aio_req = orig_req->r_priv;
739 struct inode *inode = orig_req->r_inode;
740 struct ceph_inode_info *ci = ceph_inode(inode);
741 struct ceph_snap_context *snapc;
742 struct ceph_osd_request *req;
745 spin_lock(&ci->i_ceph_lock);
746 if (__ceph_have_pending_cap_snap(ci)) {
747 struct ceph_cap_snap *capsnap =
748 list_last_entry(&ci->i_cap_snaps,
749 struct ceph_cap_snap,
751 snapc = ceph_get_snap_context(capsnap->context);
753 BUG_ON(!ci->i_head_snapc);
754 snapc = ceph_get_snap_context(ci->i_head_snapc);
756 spin_unlock(&ci->i_ceph_lock);
758 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
766 req->r_flags = CEPH_OSD_FLAG_ORDERSNAP |
767 CEPH_OSD_FLAG_ONDISK |
769 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
770 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
772 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
774 ceph_osdc_put_request(req);
779 req->r_ops[0] = orig_req->r_ops[0];
780 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
782 req->r_mtime = aio_req->mtime;
783 req->r_data_offset = req->r_ops[0].extent.offset;
785 ceph_osdc_put_request(orig_req);
787 req->r_callback = ceph_aio_complete_req;
788 req->r_inode = inode;
789 req->r_priv = aio_req;
791 ret = ceph_osdc_start_request(req->r_osdc, req, false);
795 ceph_aio_complete_req(req);
798 ceph_put_snap_context(snapc);
803 * Write commit request unsafe callback, called to tell us when a
804 * request is unsafe (that is, in flight--has been handed to the
805 * messenger to send to its target osd). It is called again when
806 * we've received a response message indicating the request is
807 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
808 * is completed early (and unsuccessfully) due to a timeout or
811 * This is used if we requested both an ACK and ONDISK commit reply
814 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
816 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
818 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
821 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
822 spin_lock(&ci->i_unsafe_lock);
823 list_add_tail(&req->r_unsafe_item,
824 &ci->i_unsafe_writes);
825 spin_unlock(&ci->i_unsafe_lock);
827 complete_all(&req->r_completion);
829 spin_lock(&ci->i_unsafe_lock);
830 list_del_init(&req->r_unsafe_item);
831 spin_unlock(&ci->i_unsafe_lock);
832 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
838 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
839 struct ceph_snap_context *snapc,
840 struct ceph_cap_flush **pcf)
842 struct file *file = iocb->ki_filp;
843 struct inode *inode = file_inode(file);
844 struct ceph_inode_info *ci = ceph_inode(inode);
845 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
846 struct ceph_vino vino;
847 struct ceph_osd_request *req;
849 struct ceph_aio_request *aio_req = NULL;
853 struct timespec mtime = current_fs_time(inode->i_sb);
854 size_t count = iov_iter_count(iter);
855 loff_t pos = iocb->ki_pos;
856 bool write = iov_iter_rw(iter) == WRITE;
858 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
861 dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
862 (write ? "write" : "read"), file, pos, (unsigned)count);
864 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
869 ret = invalidate_inode_pages2_range(inode->i_mapping,
871 (pos + count) >> PAGE_SHIFT);
873 dout("invalidate_inode_pages2_range returned %d\n", ret);
875 flags = CEPH_OSD_FLAG_ORDERSNAP |
876 CEPH_OSD_FLAG_ONDISK |
879 flags = CEPH_OSD_FLAG_READ;
882 while (iov_iter_count(iter) > 0) {
883 u64 size = dio_get_pagev_size(iter);
887 vino = ceph_vino(inode);
888 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
890 /*include a 'startsync' command*/
892 write ? CEPH_OSD_OP_WRITE :
904 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
906 ceph_osdc_put_request(req);
907 ret = PTR_ERR(pages);
912 * To simplify error handling, allow AIO when IO within i_size
913 * or IO can be satisfied by single OSD request.
915 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
916 (len == count || pos + count <= i_size_read(inode))) {
917 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
919 aio_req->iocb = iocb;
920 aio_req->write = write;
921 INIT_LIST_HEAD(&aio_req->osd_reqs);
923 aio_req->mtime = mtime;
924 swap(aio_req->prealloc_cf, *pcf);
932 * throw out any page cache pages in this range. this
935 truncate_inode_pages_range(inode->i_mapping, pos,
936 (pos+len) | (PAGE_SIZE - 1));
938 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
939 req->r_mtime = mtime;
942 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
946 aio_req->total_len += len;
948 atomic_inc(&aio_req->pending_reqs);
950 req->r_callback = ceph_aio_complete_req;
951 req->r_inode = inode;
952 req->r_priv = aio_req;
953 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
956 iov_iter_advance(iter, len);
960 ret = ceph_osdc_start_request(req->r_osdc, req, false);
962 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
964 size = i_size_read(inode);
968 if (ret >= 0 && ret < len && pos + ret < size) {
969 int zlen = min_t(size_t, len - ret,
971 ceph_zero_page_vector_range(start + ret, zlen,
979 ceph_put_page_vector(pages, num_pages, false);
981 ceph_osdc_put_request(req);
986 iov_iter_advance(iter, len);
988 if (!write && pos >= size)
991 if (write && pos > size) {
992 if (ceph_inode_set_size(inode, pos))
993 ceph_check_caps(ceph_inode(inode),
1000 if (aio_req->num_reqs == 0) {
1005 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1008 while (!list_empty(&aio_req->osd_reqs)) {
1009 req = list_first_entry(&aio_req->osd_reqs,
1010 struct ceph_osd_request,
1012 list_del_init(&req->r_unsafe_item);
1014 ret = ceph_osdc_start_request(req->r_osdc,
1017 req->r_result = ret;
1018 ceph_aio_complete_req(req);
1021 return -EIOCBQUEUED;
1024 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1025 ret = pos - iocb->ki_pos;
1032 * Synchronous write, straight from __user pointer or user pages.
1034 * If write spans object boundary, just do multiple writes. (For a
1035 * correct atomic write, we should e.g. take write locks on all
1036 * objects, rollback on failure, etc.)
1039 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1040 struct ceph_snap_context *snapc)
1042 struct file *file = iocb->ki_filp;
1043 struct inode *inode = file_inode(file);
1044 struct ceph_inode_info *ci = ceph_inode(inode);
1045 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1046 struct ceph_vino vino;
1047 struct ceph_osd_request *req;
1048 struct page **pages;
1055 struct timespec mtime = current_fs_time(inode->i_sb);
1056 size_t count = iov_iter_count(from);
1058 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1061 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
1063 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1067 ret = invalidate_inode_pages2_range(inode->i_mapping,
1069 (pos + count) >> PAGE_SHIFT);
1071 dout("invalidate_inode_pages2_range returned %d\n", ret);
1073 flags = CEPH_OSD_FLAG_ORDERSNAP |
1074 CEPH_OSD_FLAG_ONDISK |
1075 CEPH_OSD_FLAG_WRITE |
1078 while ((len = iov_iter_count(from)) > 0) {
1082 vino = ceph_vino(inode);
1083 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1084 vino, pos, &len, 0, 1,
1085 CEPH_OSD_OP_WRITE, flags, snapc,
1087 ci->i_truncate_size,
1095 * write from beginning of first page,
1096 * regardless of io alignment
1098 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1100 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1101 if (IS_ERR(pages)) {
1102 ret = PTR_ERR(pages);
1107 for (n = 0; n < num_pages; n++) {
1108 size_t plen = min_t(size_t, left, PAGE_SIZE);
1109 ret = copy_page_from_iter(pages[n], 0, plen, from);
1118 ceph_release_page_vector(pages, num_pages);
1122 /* get a second commit callback */
1123 req->r_unsafe_callback = ceph_sync_write_unsafe;
1124 req->r_inode = inode;
1126 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1129 req->r_mtime = mtime;
1130 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1132 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1135 ceph_osdc_put_request(req);
1140 if (pos > i_size_read(inode)) {
1141 check_caps = ceph_inode_set_size(inode, pos);
1143 ceph_check_caps(ceph_inode(inode),
1144 CHECK_CAPS_AUTHONLY,
1151 if (ret != -EOLDSNAPC && written > 0) {
1159 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1160 * Atomically grab references, so that those bits are not released
1161 * back to the MDS mid-read.
1163 * Hmm, the sync read case isn't actually async... should it be?
1165 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1167 struct file *filp = iocb->ki_filp;
1168 struct ceph_file_info *fi = filp->private_data;
1169 size_t len = iov_iter_count(to);
1170 struct inode *inode = file_inode(filp);
1171 struct ceph_inode_info *ci = ceph_inode(inode);
1172 struct page *pinned_page = NULL;
1175 int retry_op = 0, read = 0;
1178 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1179 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1181 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1182 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1184 want = CEPH_CAP_FILE_CACHE;
1185 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1189 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1190 (iocb->ki_flags & IOCB_DIRECT) ||
1191 (fi->flags & CEPH_F_SYNC)) {
1193 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1194 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1195 ceph_cap_string(got));
1197 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1198 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1199 ret = ceph_direct_read_write(iocb, to,
1201 if (ret >= 0 && ret < len)
1202 retry_op = CHECK_EOF;
1204 ret = ceph_sync_read(iocb, to, &retry_op);
1207 retry_op = READ_INLINE;
1210 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1211 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1212 ceph_cap_string(got));
1214 ret = generic_file_read_iter(iocb, to);
1216 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1217 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1219 put_page(pinned_page);
1222 ceph_put_cap_refs(ci, got);
1223 if (retry_op > HAVE_RETRIED && ret >= 0) {
1225 struct page *page = NULL;
1227 if (retry_op == READ_INLINE) {
1228 page = __page_cache_alloc(GFP_KERNEL);
1233 statret = __ceph_do_getattr(inode, page,
1234 CEPH_STAT_CAP_INLINE_DATA, !!page);
1237 if (statret == -ENODATA) {
1238 BUG_ON(retry_op != READ_INLINE);
1244 i_size = i_size_read(inode);
1245 if (retry_op == READ_INLINE) {
1246 BUG_ON(ret > 0 || read > 0);
1247 if (iocb->ki_pos < i_size &&
1248 iocb->ki_pos < PAGE_SIZE) {
1249 loff_t end = min_t(loff_t, i_size,
1250 iocb->ki_pos + len);
1251 end = min_t(loff_t, end, PAGE_SIZE);
1253 zero_user_segment(page, statret, end);
1254 ret = copy_page_to_iter(page,
1255 iocb->ki_pos & ~PAGE_MASK,
1256 end - iocb->ki_pos, to);
1257 iocb->ki_pos += ret;
1260 if (iocb->ki_pos < i_size && read < len) {
1261 size_t zlen = min_t(size_t, len - read,
1262 i_size - iocb->ki_pos);
1263 ret = iov_iter_zero(zlen, to);
1264 iocb->ki_pos += ret;
1267 __free_pages(page, 0);
1271 /* hit EOF or hole? */
1272 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1274 dout("sync_read hit hole, ppos %lld < size %lld"
1275 ", reading more\n", iocb->ki_pos, i_size);
1279 retry_op = HAVE_RETRIED;
1291 * Take cap references to avoid releasing caps to MDS mid-write.
1293 * If we are synchronous, and write with an old snap context, the OSD
1294 * may return EOLDSNAPC. In that case, retry the write.. _after_
1295 * dropping our cap refs and allowing the pending snap to logically
1296 * complete _before_ this write occurs.
1298 * If we are near ENOSPC, write synchronously.
1300 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1302 struct file *file = iocb->ki_filp;
1303 struct ceph_file_info *fi = file->private_data;
1304 struct inode *inode = file_inode(file);
1305 struct ceph_inode_info *ci = ceph_inode(inode);
1306 struct ceph_osd_client *osdc =
1307 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1308 struct ceph_cap_flush *prealloc_cf;
1309 ssize_t count, written = 0;
1313 if (ceph_snap(inode) != CEPH_NOSNAP)
1316 prealloc_cf = ceph_alloc_cap_flush();
1322 /* We can write back this queue in page reclaim */
1323 current->backing_dev_info = inode_to_bdi(inode);
1325 if (iocb->ki_flags & IOCB_APPEND) {
1326 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1331 err = generic_write_checks(iocb, from);
1336 count = iov_iter_count(from);
1337 err = file_remove_privs(file);
1341 err = file_update_time(file);
1345 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1346 err = ceph_uninline_data(file, NULL);
1352 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
1357 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1358 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1359 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1360 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1362 want = CEPH_CAP_FILE_BUFFER;
1364 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1369 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1370 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1372 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1373 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1374 struct ceph_snap_context *snapc;
1375 struct iov_iter data;
1376 inode_unlock(inode);
1378 spin_lock(&ci->i_ceph_lock);
1379 if (__ceph_have_pending_cap_snap(ci)) {
1380 struct ceph_cap_snap *capsnap =
1381 list_last_entry(&ci->i_cap_snaps,
1382 struct ceph_cap_snap,
1384 snapc = ceph_get_snap_context(capsnap->context);
1386 BUG_ON(!ci->i_head_snapc);
1387 snapc = ceph_get_snap_context(ci->i_head_snapc);
1389 spin_unlock(&ci->i_ceph_lock);
1391 /* we might need to revert back to that point */
1393 if (iocb->ki_flags & IOCB_DIRECT)
1394 written = ceph_direct_read_write(iocb, &data, snapc,
1397 written = ceph_sync_write(iocb, &data, pos, snapc);
1398 if (written == -EOLDSNAPC) {
1399 dout("aio_write %p %llx.%llx %llu~%u"
1400 "got EOLDSNAPC, retrying\n",
1401 inode, ceph_vinop(inode),
1402 pos, (unsigned)count);
1407 iov_iter_advance(from, written);
1408 ceph_put_snap_context(snapc);
1410 loff_t old_size = i_size_read(inode);
1412 * No need to acquire the i_truncate_mutex. Because
1413 * the MDS revokes Fwb caps before sending truncate
1414 * message to us. We can't get Fwb cap while there
1415 * are pending vmtruncate. So write and vmtruncate
1416 * can not run at the same time
1418 written = generic_perform_write(file, from, pos);
1419 if (likely(written >= 0))
1420 iocb->ki_pos = pos + written;
1421 if (i_size_read(inode) > old_size)
1422 ceph_fscache_update_objectsize(inode);
1423 inode_unlock(inode);
1428 spin_lock(&ci->i_ceph_lock);
1429 ci->i_inline_version = CEPH_INLINE_NONE;
1430 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1432 spin_unlock(&ci->i_ceph_lock);
1434 __mark_inode_dirty(inode, dirty);
1437 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1438 inode, ceph_vinop(inode), pos, (unsigned)count,
1439 ceph_cap_string(got));
1440 ceph_put_cap_refs(ci, got);
1443 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))
1444 iocb->ki_flags |= IOCB_DSYNC;
1446 written = generic_write_sync(iocb, written);
1452 inode_unlock(inode);
1454 ceph_free_cap_flush(prealloc_cf);
1455 current->backing_dev_info = NULL;
1456 return written ? written : err;
1460 * llseek. be sure to verify file size on SEEK_END.
1462 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1464 struct inode *inode = file->f_mapping->host;
1470 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1471 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1478 i_size = i_size_read(inode);
1485 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1486 * position-querying operation. Avoid rewriting the "same"
1487 * f_pos value back to the file because a concurrent read(),
1488 * write() or lseek() might have altered it
1491 offset = file->f_pos;
1494 offset += file->f_pos;
1497 if (offset >= i_size) {
1503 if (offset >= i_size) {
1511 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1514 inode_unlock(inode);
1518 static inline void ceph_zero_partial_page(
1519 struct inode *inode, loff_t offset, unsigned size)
1522 pgoff_t index = offset >> PAGE_SHIFT;
1524 page = find_lock_page(inode->i_mapping, index);
1526 wait_on_page_writeback(page);
1527 zero_user(page, offset & (PAGE_SIZE - 1), size);
1533 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1536 loff_t nearly = round_up(offset, PAGE_SIZE);
1537 if (offset < nearly) {
1538 loff_t size = nearly - offset;
1541 ceph_zero_partial_page(inode, offset, size);
1545 if (length >= PAGE_SIZE) {
1546 loff_t size = round_down(length, PAGE_SIZE);
1547 truncate_pagecache_range(inode, offset, offset + size - 1);
1552 ceph_zero_partial_page(inode, offset, length);
1555 static int ceph_zero_partial_object(struct inode *inode,
1556 loff_t offset, loff_t *length)
1558 struct ceph_inode_info *ci = ceph_inode(inode);
1559 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1560 struct ceph_osd_request *req;
1566 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1569 op = CEPH_OSD_OP_ZERO;
1572 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1576 CEPH_OSD_FLAG_WRITE |
1577 CEPH_OSD_FLAG_ONDISK,
1584 req->r_mtime = inode->i_mtime;
1585 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1587 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1591 ceph_osdc_put_request(req);
1597 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1600 struct ceph_inode_info *ci = ceph_inode(inode);
1601 s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1602 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1603 s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1604 u64 object_set_size = object_size * stripe_count;
1607 /* round offset up to next period boundary */
1608 nearly = offset + object_set_size - 1;
1610 nearly -= do_div(t, object_set_size);
1612 while (length && offset < nearly) {
1613 loff_t size = length;
1614 ret = ceph_zero_partial_object(inode, offset, &size);
1620 while (length >= object_set_size) {
1622 loff_t pos = offset;
1623 for (i = 0; i < stripe_count; ++i) {
1624 ret = ceph_zero_partial_object(inode, pos, NULL);
1629 offset += object_set_size;
1630 length -= object_set_size;
1633 loff_t size = length;
1634 ret = ceph_zero_partial_object(inode, offset, &size);
1643 static long ceph_fallocate(struct file *file, int mode,
1644 loff_t offset, loff_t length)
1646 struct ceph_file_info *fi = file->private_data;
1647 struct inode *inode = file_inode(file);
1648 struct ceph_inode_info *ci = ceph_inode(inode);
1649 struct ceph_osd_client *osdc =
1650 &ceph_inode_to_client(inode)->client->osdc;
1651 struct ceph_cap_flush *prealloc_cf;
1658 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1661 if (!S_ISREG(inode->i_mode))
1664 prealloc_cf = ceph_alloc_cap_flush();
1670 if (ceph_snap(inode) != CEPH_NOSNAP) {
1675 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1676 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1681 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1682 ret = ceph_uninline_data(file, NULL);
1687 size = i_size_read(inode);
1688 if (!(mode & FALLOC_FL_KEEP_SIZE))
1689 endoff = offset + length;
1691 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1692 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1694 want = CEPH_CAP_FILE_BUFFER;
1696 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1700 if (mode & FALLOC_FL_PUNCH_HOLE) {
1702 ceph_zero_pagecache_range(inode, offset, length);
1703 ret = ceph_zero_objects(inode, offset, length);
1704 } else if (endoff > size) {
1705 truncate_pagecache_range(inode, size, -1);
1706 if (ceph_inode_set_size(inode, endoff))
1707 ceph_check_caps(ceph_inode(inode),
1708 CHECK_CAPS_AUTHONLY, NULL);
1712 spin_lock(&ci->i_ceph_lock);
1713 ci->i_inline_version = CEPH_INLINE_NONE;
1714 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1716 spin_unlock(&ci->i_ceph_lock);
1718 __mark_inode_dirty(inode, dirty);
1721 ceph_put_cap_refs(ci, got);
1723 inode_unlock(inode);
1724 ceph_free_cap_flush(prealloc_cf);
1728 const struct file_operations ceph_file_fops = {
1730 .release = ceph_release,
1731 .llseek = ceph_llseek,
1732 .read_iter = ceph_read_iter,
1733 .write_iter = ceph_write_iter,
1735 .fsync = ceph_fsync,
1737 .flock = ceph_flock,
1738 .splice_read = generic_file_splice_read,
1739 .splice_write = iter_file_splice_write,
1740 .unlocked_ioctl = ceph_ioctl,
1741 .compat_ioctl = ceph_ioctl,
1742 .fallocate = ceph_fallocate,