3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
192 const char *image_id;
193 const char *image_name;
196 const char *snap_name;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client *client;
207 struct list_head node;
210 struct pending_result {
211 int result; /* first nonzero result */
215 struct rbd_img_request;
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type {
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
299 struct ceph_bio_iter bio_pos;
301 struct ceph_bvec_iter bvec_pos;
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
311 struct list_head osd_reqs; /* w/ r_private_item */
313 struct mutex state_mutex;
314 struct pending_result pending;
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
335 enum rbd_img_state state;
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
341 struct request *rq; /* block request */
342 struct rbd_obj_request *obj_request; /* obj req initiator */
345 struct list_head lock_item;
346 struct list_head object_extents; /* obj_req.ex structs */
348 struct mutex state_mutex;
349 struct pending_result pending;
350 struct work_struct work;
355 #define for_each_obj_request(ireq, oreq) \
356 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
357 #define for_each_obj_request_safe(ireq, oreq, n) \
358 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
360 enum rbd_watch_state {
361 RBD_WATCH_STATE_UNREGISTERED,
362 RBD_WATCH_STATE_REGISTERED,
363 RBD_WATCH_STATE_ERROR,
366 enum rbd_lock_state {
367 RBD_LOCK_STATE_UNLOCKED,
368 RBD_LOCK_STATE_LOCKED,
369 RBD_LOCK_STATE_RELEASING,
372 /* WatchNotify::ClientId */
373 struct rbd_client_id {
386 int dev_id; /* blkdev unique id */
388 int major; /* blkdev assigned major */
390 struct gendisk *disk; /* blkdev's gendisk and rq */
392 u32 image_format; /* Either 1 or 2 */
393 struct rbd_client *rbd_client;
395 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
397 spinlock_t lock; /* queue, flags, open_count */
399 struct rbd_image_header header;
400 unsigned long flags; /* possibly lock protected */
401 struct rbd_spec *spec;
402 struct rbd_options *opts;
403 char *config_info; /* add{,_single_major} string */
405 struct ceph_object_id header_oid;
406 struct ceph_object_locator header_oloc;
408 struct ceph_file_layout layout; /* used for all rbd requests */
410 struct mutex watch_mutex;
411 enum rbd_watch_state watch_state;
412 struct ceph_osd_linger_request *watch_handle;
414 struct delayed_work watch_dwork;
416 struct rw_semaphore lock_rwsem;
417 enum rbd_lock_state lock_state;
418 char lock_cookie[32];
419 struct rbd_client_id owner_cid;
420 struct work_struct acquired_lock_work;
421 struct work_struct released_lock_work;
422 struct delayed_work lock_dwork;
423 struct work_struct unlock_work;
424 spinlock_t lock_lists_lock;
425 struct list_head acquiring_list;
426 struct list_head running_list;
427 struct completion acquire_wait;
429 struct completion releasing_wait;
431 spinlock_t object_map_lock;
433 u64 object_map_size; /* in objects */
434 u64 object_map_flags;
436 struct workqueue_struct *task_wq;
438 struct rbd_spec *parent_spec;
441 struct rbd_device *parent;
443 /* Block layer tags. */
444 struct blk_mq_tag_set tag_set;
446 /* protects updating the header */
447 struct rw_semaphore header_rwsem;
449 struct rbd_mapping mapping;
451 struct list_head node;
455 unsigned long open_count; /* protected by lock */
459 * Flag bits for rbd_dev->flags:
460 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
464 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
465 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
466 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
469 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
471 static LIST_HEAD(rbd_dev_list); /* devices */
472 static DEFINE_SPINLOCK(rbd_dev_list_lock);
474 static LIST_HEAD(rbd_client_list); /* clients */
475 static DEFINE_SPINLOCK(rbd_client_list_lock);
477 /* Slab caches for frequently-allocated structures */
479 static struct kmem_cache *rbd_img_request_cache;
480 static struct kmem_cache *rbd_obj_request_cache;
482 static int rbd_major;
483 static DEFINE_IDA(rbd_dev_id_ida);
485 static struct workqueue_struct *rbd_wq;
487 static struct ceph_snap_context rbd_empty_snapc = {
488 .nref = REFCOUNT_INIT(1),
492 * single-major requires >= 0.75 version of userspace rbd utility.
494 static bool single_major = true;
495 module_param(single_major, bool, 0444);
496 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
498 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
499 static ssize_t remove_store(struct bus_type *bus, const char *buf,
501 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
503 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
505 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
507 static int rbd_dev_id_to_minor(int dev_id)
509 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
512 static int minor_to_rbd_dev_id(int minor)
514 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
517 static bool rbd_is_ro(struct rbd_device *rbd_dev)
519 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
522 static bool rbd_is_snap(struct rbd_device *rbd_dev)
524 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
527 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
529 lockdep_assert_held(&rbd_dev->lock_rwsem);
531 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
532 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
535 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
539 down_read(&rbd_dev->lock_rwsem);
540 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
541 up_read(&rbd_dev->lock_rwsem);
542 return is_lock_owner;
545 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
547 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
550 static BUS_ATTR_WO(add);
551 static BUS_ATTR_WO(remove);
552 static BUS_ATTR_WO(add_single_major);
553 static BUS_ATTR_WO(remove_single_major);
554 static BUS_ATTR_RO(supported_features);
556 static struct attribute *rbd_bus_attrs[] = {
558 &bus_attr_remove.attr,
559 &bus_attr_add_single_major.attr,
560 &bus_attr_remove_single_major.attr,
561 &bus_attr_supported_features.attr,
565 static umode_t rbd_bus_is_visible(struct kobject *kobj,
566 struct attribute *attr, int index)
569 (attr == &bus_attr_add_single_major.attr ||
570 attr == &bus_attr_remove_single_major.attr))
576 static const struct attribute_group rbd_bus_group = {
577 .attrs = rbd_bus_attrs,
578 .is_visible = rbd_bus_is_visible,
580 __ATTRIBUTE_GROUPS(rbd_bus);
582 static struct bus_type rbd_bus_type = {
584 .bus_groups = rbd_bus_groups,
587 static void rbd_root_dev_release(struct device *dev)
591 static struct device rbd_root_dev = {
593 .release = rbd_root_dev_release,
596 static __printf(2, 3)
597 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
599 struct va_format vaf;
607 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
608 else if (rbd_dev->disk)
609 printk(KERN_WARNING "%s: %s: %pV\n",
610 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
611 else if (rbd_dev->spec && rbd_dev->spec->image_name)
612 printk(KERN_WARNING "%s: image %s: %pV\n",
613 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
614 else if (rbd_dev->spec && rbd_dev->spec->image_id)
615 printk(KERN_WARNING "%s: id %s: %pV\n",
616 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
618 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
619 RBD_DRV_NAME, rbd_dev, &vaf);
624 #define rbd_assert(expr) \
625 if (unlikely(!(expr))) { \
626 printk(KERN_ERR "\nAssertion failure in %s() " \
628 "\trbd_assert(%s);\n\n", \
629 __func__, __LINE__, #expr); \
632 #else /* !RBD_DEBUG */
633 # define rbd_assert(expr) ((void) 0)
634 #endif /* !RBD_DEBUG */
636 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
638 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
639 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
640 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
641 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
642 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
644 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
645 u8 *order, u64 *snap_size);
646 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
648 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
649 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
652 * Return true if nothing else is pending.
654 static bool pending_result_dec(struct pending_result *pending, int *result)
656 rbd_assert(pending->num_pending > 0);
658 if (*result && !pending->result)
659 pending->result = *result;
660 if (--pending->num_pending)
663 *result = pending->result;
667 static int rbd_open(struct block_device *bdev, fmode_t mode)
669 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
670 bool removing = false;
672 spin_lock_irq(&rbd_dev->lock);
673 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
676 rbd_dev->open_count++;
677 spin_unlock_irq(&rbd_dev->lock);
681 (void) get_device(&rbd_dev->dev);
686 static void rbd_release(struct gendisk *disk, fmode_t mode)
688 struct rbd_device *rbd_dev = disk->private_data;
689 unsigned long open_count_before;
691 spin_lock_irq(&rbd_dev->lock);
692 open_count_before = rbd_dev->open_count--;
693 spin_unlock_irq(&rbd_dev->lock);
694 rbd_assert(open_count_before > 0);
696 put_device(&rbd_dev->dev);
699 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
703 if (get_user(ro, (int __user *)arg))
707 * Both images mapped read-only and snapshots can't be marked
711 if (rbd_is_ro(rbd_dev))
714 rbd_assert(!rbd_is_snap(rbd_dev));
717 /* Let blkdev_roset() handle it */
721 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
722 unsigned int cmd, unsigned long arg)
724 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
729 ret = rbd_ioctl_set_ro(rbd_dev, arg);
739 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
740 unsigned int cmd, unsigned long arg)
742 return rbd_ioctl(bdev, mode, cmd, arg);
744 #endif /* CONFIG_COMPAT */
746 static const struct block_device_operations rbd_bd_ops = {
747 .owner = THIS_MODULE,
749 .release = rbd_release,
752 .compat_ioctl = rbd_compat_ioctl,
757 * Initialize an rbd client instance. Success or not, this function
758 * consumes ceph_opts. Caller holds client_mutex.
760 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
762 struct rbd_client *rbdc;
765 dout("%s:\n", __func__);
766 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
770 kref_init(&rbdc->kref);
771 INIT_LIST_HEAD(&rbdc->node);
773 rbdc->client = ceph_create_client(ceph_opts, rbdc);
774 if (IS_ERR(rbdc->client))
776 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
778 ret = ceph_open_session(rbdc->client);
782 spin_lock(&rbd_client_list_lock);
783 list_add_tail(&rbdc->node, &rbd_client_list);
784 spin_unlock(&rbd_client_list_lock);
786 dout("%s: rbdc %p\n", __func__, rbdc);
790 ceph_destroy_client(rbdc->client);
795 ceph_destroy_options(ceph_opts);
796 dout("%s: error %d\n", __func__, ret);
801 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
803 kref_get(&rbdc->kref);
809 * Find a ceph client with specific addr and configuration. If
810 * found, bump its reference count.
812 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
814 struct rbd_client *client_node;
817 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
820 spin_lock(&rbd_client_list_lock);
821 list_for_each_entry(client_node, &rbd_client_list, node) {
822 if (!ceph_compare_options(ceph_opts, client_node->client)) {
823 __rbd_get_client(client_node);
829 spin_unlock(&rbd_client_list_lock);
831 return found ? client_node : NULL;
835 * (Per device) rbd map options
843 /* string args above */
851 static const struct fs_parameter_spec rbd_parameters[] = {
852 fsparam_u32 ("alloc_size", Opt_alloc_size),
853 fsparam_flag ("exclusive", Opt_exclusive),
854 fsparam_flag ("lock_on_read", Opt_lock_on_read),
855 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
856 fsparam_flag ("notrim", Opt_notrim),
857 fsparam_string ("_pool_ns", Opt_pool_ns),
858 fsparam_u32 ("queue_depth", Opt_queue_depth),
859 fsparam_flag ("read_only", Opt_read_only),
860 fsparam_flag ("read_write", Opt_read_write),
861 fsparam_flag ("ro", Opt_read_only),
862 fsparam_flag ("rw", Opt_read_write),
869 unsigned long lock_timeout;
876 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
877 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
878 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
879 #define RBD_READ_ONLY_DEFAULT false
880 #define RBD_LOCK_ON_READ_DEFAULT false
881 #define RBD_EXCLUSIVE_DEFAULT false
882 #define RBD_TRIM_DEFAULT true
884 struct rbd_parse_opts_ctx {
885 struct rbd_spec *spec;
886 struct ceph_options *copts;
887 struct rbd_options *opts;
890 static char* obj_op_name(enum obj_operation_type op_type)
907 * Destroy ceph client
909 * Caller must hold rbd_client_list_lock.
911 static void rbd_client_release(struct kref *kref)
913 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
915 dout("%s: rbdc %p\n", __func__, rbdc);
916 spin_lock(&rbd_client_list_lock);
917 list_del(&rbdc->node);
918 spin_unlock(&rbd_client_list_lock);
920 ceph_destroy_client(rbdc->client);
925 * Drop reference to ceph client node. If it's not referenced anymore, release
928 static void rbd_put_client(struct rbd_client *rbdc)
931 kref_put(&rbdc->kref, rbd_client_release);
935 * Get a ceph client with specific addr and configuration, if one does
936 * not exist create it. Either way, ceph_opts is consumed by this
939 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
941 struct rbd_client *rbdc;
944 mutex_lock(&client_mutex);
945 rbdc = rbd_client_find(ceph_opts);
947 ceph_destroy_options(ceph_opts);
950 * Using an existing client. Make sure ->pg_pools is up to
951 * date before we look up the pool id in do_rbd_add().
953 ret = ceph_wait_for_latest_osdmap(rbdc->client,
954 rbdc->client->options->mount_timeout);
956 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
957 rbd_put_client(rbdc);
961 rbdc = rbd_client_create(ceph_opts);
963 mutex_unlock(&client_mutex);
968 static bool rbd_image_format_valid(u32 image_format)
970 return image_format == 1 || image_format == 2;
973 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
978 /* The header has to start with the magic rbd header text */
979 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
982 /* The bio layer requires at least sector-sized I/O */
984 if (ondisk->options.order < SECTOR_SHIFT)
987 /* If we use u64 in a few spots we may be able to loosen this */
989 if (ondisk->options.order > 8 * sizeof (int) - 1)
993 * The size of a snapshot header has to fit in a size_t, and
994 * that limits the number of snapshots.
996 snap_count = le32_to_cpu(ondisk->snap_count);
997 size = SIZE_MAX - sizeof (struct ceph_snap_context);
998 if (snap_count > size / sizeof (__le64))
1002 * Not only that, but the size of the entire the snapshot
1003 * header must also be representable in a size_t.
1005 size -= snap_count * sizeof (__le64);
1006 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1013 * returns the size of an object in the image
1015 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1017 return 1U << header->obj_order;
1020 static void rbd_init_layout(struct rbd_device *rbd_dev)
1022 if (rbd_dev->header.stripe_unit == 0 ||
1023 rbd_dev->header.stripe_count == 0) {
1024 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1025 rbd_dev->header.stripe_count = 1;
1028 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1029 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1030 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1031 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1032 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1033 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1037 * Fill an rbd image header with information from the given format 1
1040 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1041 struct rbd_image_header_ondisk *ondisk)
1043 struct rbd_image_header *header = &rbd_dev->header;
1044 bool first_time = header->object_prefix == NULL;
1045 struct ceph_snap_context *snapc;
1046 char *object_prefix = NULL;
1047 char *snap_names = NULL;
1048 u64 *snap_sizes = NULL;
1053 /* Allocate this now to avoid having to handle failure below */
1056 object_prefix = kstrndup(ondisk->object_prefix,
1057 sizeof(ondisk->object_prefix),
1063 /* Allocate the snapshot context and fill it in */
1065 snap_count = le32_to_cpu(ondisk->snap_count);
1066 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1069 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1071 struct rbd_image_snap_ondisk *snaps;
1072 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1074 /* We'll keep a copy of the snapshot names... */
1076 if (snap_names_len > (u64)SIZE_MAX)
1078 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1082 /* ...as well as the array of their sizes. */
1083 snap_sizes = kmalloc_array(snap_count,
1084 sizeof(*header->snap_sizes),
1090 * Copy the names, and fill in each snapshot's id
1093 * Note that rbd_dev_v1_header_info() guarantees the
1094 * ondisk buffer we're working with has
1095 * snap_names_len bytes beyond the end of the
1096 * snapshot id array, this memcpy() is safe.
1098 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1099 snaps = ondisk->snaps;
1100 for (i = 0; i < snap_count; i++) {
1101 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1102 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1106 /* We won't fail any more, fill in the header */
1109 header->object_prefix = object_prefix;
1110 header->obj_order = ondisk->options.order;
1111 rbd_init_layout(rbd_dev);
1113 ceph_put_snap_context(header->snapc);
1114 kfree(header->snap_names);
1115 kfree(header->snap_sizes);
1118 /* The remaining fields always get updated (when we refresh) */
1120 header->image_size = le64_to_cpu(ondisk->image_size);
1121 header->snapc = snapc;
1122 header->snap_names = snap_names;
1123 header->snap_sizes = snap_sizes;
1131 ceph_put_snap_context(snapc);
1132 kfree(object_prefix);
1137 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1139 const char *snap_name;
1141 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1143 /* Skip over names until we find the one we are looking for */
1145 snap_name = rbd_dev->header.snap_names;
1147 snap_name += strlen(snap_name) + 1;
1149 return kstrdup(snap_name, GFP_KERNEL);
1153 * Snapshot id comparison function for use with qsort()/bsearch().
1154 * Note that result is for snapshots in *descending* order.
1156 static int snapid_compare_reverse(const void *s1, const void *s2)
1158 u64 snap_id1 = *(u64 *)s1;
1159 u64 snap_id2 = *(u64 *)s2;
1161 if (snap_id1 < snap_id2)
1163 return snap_id1 == snap_id2 ? 0 : -1;
1167 * Search a snapshot context to see if the given snapshot id is
1170 * Returns the position of the snapshot id in the array if it's found,
1171 * or BAD_SNAP_INDEX otherwise.
1173 * Note: The snapshot array is in kept sorted (by the osd) in
1174 * reverse order, highest snapshot id first.
1176 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1178 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1181 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1182 sizeof (snap_id), snapid_compare_reverse);
1184 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1187 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1191 const char *snap_name;
1193 which = rbd_dev_snap_index(rbd_dev, snap_id);
1194 if (which == BAD_SNAP_INDEX)
1195 return ERR_PTR(-ENOENT);
1197 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1198 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1201 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1203 if (snap_id == CEPH_NOSNAP)
1204 return RBD_SNAP_HEAD_NAME;
1206 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1207 if (rbd_dev->image_format == 1)
1208 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1210 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1213 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1216 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1217 if (snap_id == CEPH_NOSNAP) {
1218 *snap_size = rbd_dev->header.image_size;
1219 } else if (rbd_dev->image_format == 1) {
1222 which = rbd_dev_snap_index(rbd_dev, snap_id);
1223 if (which == BAD_SNAP_INDEX)
1226 *snap_size = rbd_dev->header.snap_sizes[which];
1231 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1240 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1242 u64 snap_id = rbd_dev->spec->snap_id;
1246 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1250 rbd_dev->mapping.size = size;
1254 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1256 rbd_dev->mapping.size = 0;
1259 static void zero_bvec(struct bio_vec *bv)
1262 unsigned long flags;
1264 buf = bvec_kmap_irq(bv, &flags);
1265 memset(buf, 0, bv->bv_len);
1266 flush_dcache_page(bv->bv_page);
1267 bvec_kunmap_irq(buf, &flags);
1270 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1272 struct ceph_bio_iter it = *bio_pos;
1274 ceph_bio_iter_advance(&it, off);
1275 ceph_bio_iter_advance_step(&it, bytes, ({
1280 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1282 struct ceph_bvec_iter it = *bvec_pos;
1284 ceph_bvec_iter_advance(&it, off);
1285 ceph_bvec_iter_advance_step(&it, bytes, ({
1291 * Zero a range in @obj_req data buffer defined by a bio (list) or
1292 * (private) bio_vec array.
1294 * @off is relative to the start of the data buffer.
1296 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1299 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1301 switch (obj_req->img_request->data_type) {
1302 case OBJ_REQUEST_BIO:
1303 zero_bios(&obj_req->bio_pos, off, bytes);
1305 case OBJ_REQUEST_BVECS:
1306 case OBJ_REQUEST_OWN_BVECS:
1307 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1314 static void rbd_obj_request_destroy(struct kref *kref);
1315 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1317 rbd_assert(obj_request != NULL);
1318 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1319 kref_read(&obj_request->kref));
1320 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1323 static void rbd_img_request_destroy(struct kref *kref);
1324 static void rbd_img_request_put(struct rbd_img_request *img_request)
1326 rbd_assert(img_request != NULL);
1327 dout("%s: img %p (was %d)\n", __func__, img_request,
1328 kref_read(&img_request->kref));
1329 kref_put(&img_request->kref, rbd_img_request_destroy);
1332 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1333 struct rbd_obj_request *obj_request)
1335 rbd_assert(obj_request->img_request == NULL);
1337 /* Image request now owns object's original reference */
1338 obj_request->img_request = img_request;
1339 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1342 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1343 struct rbd_obj_request *obj_request)
1345 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1346 list_del(&obj_request->ex.oe_item);
1347 rbd_assert(obj_request->img_request == img_request);
1348 rbd_obj_request_put(obj_request);
1351 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1353 struct rbd_obj_request *obj_req = osd_req->r_priv;
1355 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1356 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1357 obj_req->ex.oe_off, obj_req->ex.oe_len);
1358 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1362 * The default/initial value for all image request flags is 0. Each
1363 * is conditionally set to 1 at image request initialization time
1364 * and currently never change thereafter.
1366 static void img_request_layered_set(struct rbd_img_request *img_request)
1368 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1372 static void img_request_layered_clear(struct rbd_img_request *img_request)
1374 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1378 static bool img_request_layered_test(struct rbd_img_request *img_request)
1381 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1384 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1386 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1388 return !obj_req->ex.oe_off &&
1389 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1392 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1394 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1396 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1397 rbd_dev->layout.object_size;
1401 * Must be called after rbd_obj_calc_img_extents().
1403 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1405 if (!obj_req->num_img_extents ||
1406 (rbd_obj_is_entire(obj_req) &&
1407 !obj_req->img_request->snapc->num_snaps))
1413 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1415 return ceph_file_extents_bytes(obj_req->img_extents,
1416 obj_req->num_img_extents);
1419 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1421 switch (img_req->op_type) {
1425 case OBJ_OP_DISCARD:
1426 case OBJ_OP_ZEROOUT:
1433 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1435 struct rbd_obj_request *obj_req = osd_req->r_priv;
1438 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1439 osd_req->r_result, obj_req);
1442 * Writes aren't allowed to return a data payload. In some
1443 * guarded write cases (e.g. stat + zero on an empty object)
1444 * a stat response makes it through, but we don't care.
1446 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1449 result = osd_req->r_result;
1451 rbd_obj_handle_request(obj_req, result);
1454 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1456 struct rbd_obj_request *obj_request = osd_req->r_priv;
1458 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1459 osd_req->r_snapid = obj_request->img_request->snap_id;
1462 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1464 struct rbd_obj_request *obj_request = osd_req->r_priv;
1466 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1467 ktime_get_real_ts64(&osd_req->r_mtime);
1468 osd_req->r_data_offset = obj_request->ex.oe_off;
1471 static struct ceph_osd_request *
1472 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1473 struct ceph_snap_context *snapc, int num_ops)
1475 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1476 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1477 struct ceph_osd_request *req;
1478 const char *name_format = rbd_dev->image_format == 1 ?
1479 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1482 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1484 return ERR_PTR(-ENOMEM);
1486 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1487 req->r_callback = rbd_osd_req_callback;
1488 req->r_priv = obj_req;
1491 * Data objects may be stored in a separate pool, but always in
1492 * the same namespace in that pool as the header in its pool.
1494 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1495 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1497 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1498 rbd_dev->header.object_prefix,
1499 obj_req->ex.oe_objno);
1501 return ERR_PTR(ret);
1506 static struct ceph_osd_request *
1507 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1509 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1513 static struct rbd_obj_request *rbd_obj_request_create(void)
1515 struct rbd_obj_request *obj_request;
1517 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1521 ceph_object_extent_init(&obj_request->ex);
1522 INIT_LIST_HEAD(&obj_request->osd_reqs);
1523 mutex_init(&obj_request->state_mutex);
1524 kref_init(&obj_request->kref);
1526 dout("%s %p\n", __func__, obj_request);
1530 static void rbd_obj_request_destroy(struct kref *kref)
1532 struct rbd_obj_request *obj_request;
1533 struct ceph_osd_request *osd_req;
1536 obj_request = container_of(kref, struct rbd_obj_request, kref);
1538 dout("%s: obj %p\n", __func__, obj_request);
1540 while (!list_empty(&obj_request->osd_reqs)) {
1541 osd_req = list_first_entry(&obj_request->osd_reqs,
1542 struct ceph_osd_request, r_private_item);
1543 list_del_init(&osd_req->r_private_item);
1544 ceph_osdc_put_request(osd_req);
1547 switch (obj_request->img_request->data_type) {
1548 case OBJ_REQUEST_NODATA:
1549 case OBJ_REQUEST_BIO:
1550 case OBJ_REQUEST_BVECS:
1551 break; /* Nothing to do */
1552 case OBJ_REQUEST_OWN_BVECS:
1553 kfree(obj_request->bvec_pos.bvecs);
1559 kfree(obj_request->img_extents);
1560 if (obj_request->copyup_bvecs) {
1561 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1562 if (obj_request->copyup_bvecs[i].bv_page)
1563 __free_page(obj_request->copyup_bvecs[i].bv_page);
1565 kfree(obj_request->copyup_bvecs);
1568 kmem_cache_free(rbd_obj_request_cache, obj_request);
1571 /* It's OK to call this for a device with no parent */
1573 static void rbd_spec_put(struct rbd_spec *spec);
1574 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1576 rbd_dev_remove_parent(rbd_dev);
1577 rbd_spec_put(rbd_dev->parent_spec);
1578 rbd_dev->parent_spec = NULL;
1579 rbd_dev->parent_overlap = 0;
1583 * Parent image reference counting is used to determine when an
1584 * image's parent fields can be safely torn down--after there are no
1585 * more in-flight requests to the parent image. When the last
1586 * reference is dropped, cleaning them up is safe.
1588 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1592 if (!rbd_dev->parent_spec)
1595 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1599 /* Last reference; clean up parent data structures */
1602 rbd_dev_unparent(rbd_dev);
1604 rbd_warn(rbd_dev, "parent reference underflow");
1608 * If an image has a non-zero parent overlap, get a reference to its
1611 * Returns true if the rbd device has a parent with a non-zero
1612 * overlap and a reference for it was successfully taken, or
1615 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1619 if (!rbd_dev->parent_spec)
1622 down_read(&rbd_dev->header_rwsem);
1623 if (rbd_dev->parent_overlap)
1624 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1625 up_read(&rbd_dev->header_rwsem);
1628 rbd_warn(rbd_dev, "parent reference overflow");
1634 * Caller is responsible for filling in the list of object requests
1635 * that comprises the image request, and the Linux request pointer
1636 * (if there is one).
1638 static struct rbd_img_request *rbd_img_request_create(
1639 struct rbd_device *rbd_dev,
1640 enum obj_operation_type op_type,
1641 struct ceph_snap_context *snapc)
1643 struct rbd_img_request *img_request;
1645 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1649 img_request->rbd_dev = rbd_dev;
1650 img_request->op_type = op_type;
1651 if (!rbd_img_is_write(img_request))
1652 img_request->snap_id = rbd_dev->spec->snap_id;
1654 img_request->snapc = snapc;
1656 if (rbd_dev_parent_get(rbd_dev))
1657 img_request_layered_set(img_request);
1659 INIT_LIST_HEAD(&img_request->lock_item);
1660 INIT_LIST_HEAD(&img_request->object_extents);
1661 mutex_init(&img_request->state_mutex);
1662 kref_init(&img_request->kref);
1667 static void rbd_img_request_destroy(struct kref *kref)
1669 struct rbd_img_request *img_request;
1670 struct rbd_obj_request *obj_request;
1671 struct rbd_obj_request *next_obj_request;
1673 img_request = container_of(kref, struct rbd_img_request, kref);
1675 dout("%s: img %p\n", __func__, img_request);
1677 WARN_ON(!list_empty(&img_request->lock_item));
1678 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1679 rbd_img_obj_request_del(img_request, obj_request);
1681 if (img_request_layered_test(img_request)) {
1682 img_request_layered_clear(img_request);
1683 rbd_dev_parent_put(img_request->rbd_dev);
1686 if (rbd_img_is_write(img_request))
1687 ceph_put_snap_context(img_request->snapc);
1689 kmem_cache_free(rbd_img_request_cache, img_request);
1692 #define BITS_PER_OBJ 2
1693 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1694 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1696 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1697 u64 *index, u8 *shift)
1701 rbd_assert(objno < rbd_dev->object_map_size);
1702 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1703 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1706 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1711 lockdep_assert_held(&rbd_dev->object_map_lock);
1712 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1713 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1716 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1722 lockdep_assert_held(&rbd_dev->object_map_lock);
1723 rbd_assert(!(val & ~OBJ_MASK));
1725 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1726 p = &rbd_dev->object_map[index];
1727 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1730 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1734 spin_lock(&rbd_dev->object_map_lock);
1735 state = __rbd_object_map_get(rbd_dev, objno);
1736 spin_unlock(&rbd_dev->object_map_lock);
1740 static bool use_object_map(struct rbd_device *rbd_dev)
1743 * An image mapped read-only can't use the object map -- it isn't
1744 * loaded because the header lock isn't acquired. Someone else can
1745 * write to the image and update the object map behind our back.
1747 * A snapshot can't be written to, so using the object map is always
1750 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1753 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1754 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1757 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1761 /* fall back to default logic if object map is disabled or invalid */
1762 if (!use_object_map(rbd_dev))
1765 state = rbd_object_map_get(rbd_dev, objno);
1766 return state != OBJECT_NONEXISTENT;
1769 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1770 struct ceph_object_id *oid)
1772 if (snap_id == CEPH_NOSNAP)
1773 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1774 rbd_dev->spec->image_id);
1776 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1777 rbd_dev->spec->image_id, snap_id);
1780 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1782 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1783 CEPH_DEFINE_OID_ONSTACK(oid);
1786 struct ceph_locker *lockers;
1788 bool broke_lock = false;
1791 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1794 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1795 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1796 if (ret != -EBUSY || broke_lock) {
1798 ret = 0; /* already locked by myself */
1800 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1804 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1805 RBD_LOCK_NAME, &lock_type, &lock_tag,
1806 &lockers, &num_lockers);
1811 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1816 if (num_lockers == 0)
1819 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1820 ENTITY_NAME(lockers[0].id.name));
1822 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1823 RBD_LOCK_NAME, lockers[0].id.cookie,
1824 &lockers[0].id.name);
1825 ceph_free_lockers(lockers, num_lockers);
1830 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1838 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1840 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1841 CEPH_DEFINE_OID_ONSTACK(oid);
1844 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1846 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1848 if (ret && ret != -ENOENT)
1849 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1852 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1860 ceph_decode_32_safe(p, end, header_len, e_inval);
1861 header_end = *p + header_len;
1863 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1868 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1877 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1879 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1880 CEPH_DEFINE_OID_ONSTACK(oid);
1881 struct page **pages;
1885 u64 object_map_bytes;
1886 u64 object_map_size;
1890 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1892 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1893 rbd_dev->mapping.size);
1894 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1896 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1897 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1899 return PTR_ERR(pages);
1901 reply_len = num_pages * PAGE_SIZE;
1902 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1903 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1904 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1905 NULL, 0, pages, &reply_len);
1909 p = page_address(pages[0]);
1910 end = p + min(reply_len, (size_t)PAGE_SIZE);
1911 ret = decode_object_map_header(&p, end, &object_map_size);
1915 if (object_map_size != num_objects) {
1916 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1917 object_map_size, num_objects);
1922 if (offset_in_page(p) + object_map_bytes > reply_len) {
1927 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1928 if (!rbd_dev->object_map) {
1933 rbd_dev->object_map_size = object_map_size;
1934 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1935 offset_in_page(p), object_map_bytes);
1938 ceph_release_page_vector(pages, num_pages);
1942 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1944 kvfree(rbd_dev->object_map);
1945 rbd_dev->object_map = NULL;
1946 rbd_dev->object_map_size = 0;
1949 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1953 ret = __rbd_object_map_load(rbd_dev);
1957 ret = rbd_dev_v2_get_flags(rbd_dev);
1959 rbd_object_map_free(rbd_dev);
1963 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1964 rbd_warn(rbd_dev, "object map is invalid");
1969 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1973 ret = rbd_object_map_lock(rbd_dev);
1977 ret = rbd_object_map_load(rbd_dev);
1979 rbd_object_map_unlock(rbd_dev);
1986 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1988 rbd_object_map_free(rbd_dev);
1989 rbd_object_map_unlock(rbd_dev);
1993 * This function needs snap_id (or more precisely just something to
1994 * distinguish between HEAD and snapshot object maps), new_state and
1995 * current_state that were passed to rbd_object_map_update().
1997 * To avoid allocating and stashing a context we piggyback on the OSD
1998 * request. A HEAD update has two ops (assert_locked). For new_state
1999 * and current_state we decode our own object_map_update op, encoded in
2000 * rbd_cls_object_map_update().
2002 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
2003 struct ceph_osd_request *osd_req)
2005 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2006 struct ceph_osd_data *osd_data;
2008 u8 state, new_state, uninitialized_var(current_state);
2009 bool has_current_state;
2012 if (osd_req->r_result)
2013 return osd_req->r_result;
2016 * Nothing to do for a snapshot object map.
2018 if (osd_req->r_num_ops == 1)
2022 * Update in-memory HEAD object map.
2024 rbd_assert(osd_req->r_num_ops == 2);
2025 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2026 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2028 p = page_address(osd_data->pages[0]);
2029 objno = ceph_decode_64(&p);
2030 rbd_assert(objno == obj_req->ex.oe_objno);
2031 rbd_assert(ceph_decode_64(&p) == objno + 1);
2032 new_state = ceph_decode_8(&p);
2033 has_current_state = ceph_decode_8(&p);
2034 if (has_current_state)
2035 current_state = ceph_decode_8(&p);
2037 spin_lock(&rbd_dev->object_map_lock);
2038 state = __rbd_object_map_get(rbd_dev, objno);
2039 if (!has_current_state || current_state == state ||
2040 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2041 __rbd_object_map_set(rbd_dev, objno, new_state);
2042 spin_unlock(&rbd_dev->object_map_lock);
2047 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2049 struct rbd_obj_request *obj_req = osd_req->r_priv;
2052 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2053 osd_req->r_result, obj_req);
2055 result = rbd_object_map_update_finish(obj_req, osd_req);
2056 rbd_obj_handle_request(obj_req, result);
2059 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2061 u8 state = rbd_object_map_get(rbd_dev, objno);
2063 if (state == new_state ||
2064 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2065 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2071 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2072 int which, u64 objno, u8 new_state,
2073 const u8 *current_state)
2075 struct page **pages;
2079 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2083 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2085 return PTR_ERR(pages);
2087 p = start = page_address(pages[0]);
2088 ceph_encode_64(&p, objno);
2089 ceph_encode_64(&p, objno + 1);
2090 ceph_encode_8(&p, new_state);
2091 if (current_state) {
2092 ceph_encode_8(&p, 1);
2093 ceph_encode_8(&p, *current_state);
2095 ceph_encode_8(&p, 0);
2098 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2105 * 0 - object map update sent
2106 * 1 - object map update isn't needed
2109 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2110 u8 new_state, const u8 *current_state)
2112 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2113 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2114 struct ceph_osd_request *req;
2119 if (snap_id == CEPH_NOSNAP) {
2120 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2123 num_ops++; /* assert_locked */
2126 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2130 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2131 req->r_callback = rbd_object_map_callback;
2132 req->r_priv = obj_req;
2134 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2135 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2136 req->r_flags = CEPH_OSD_FLAG_WRITE;
2137 ktime_get_real_ts64(&req->r_mtime);
2139 if (snap_id == CEPH_NOSNAP) {
2141 * Protect against possible race conditions during lock
2142 * ownership transitions.
2144 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2145 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2150 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2151 new_state, current_state);
2155 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2159 ceph_osdc_start_request(osdc, req, false);
2163 static void prune_extents(struct ceph_file_extent *img_extents,
2164 u32 *num_img_extents, u64 overlap)
2166 u32 cnt = *num_img_extents;
2168 /* drop extents completely beyond the overlap */
2169 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2173 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2175 /* trim final overlapping extent */
2176 if (ex->fe_off + ex->fe_len > overlap)
2177 ex->fe_len = overlap - ex->fe_off;
2180 *num_img_extents = cnt;
2184 * Determine the byte range(s) covered by either just the object extent
2185 * or the entire object in the parent image.
2187 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2190 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2193 if (!rbd_dev->parent_overlap)
2196 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2197 entire ? 0 : obj_req->ex.oe_off,
2198 entire ? rbd_dev->layout.object_size :
2200 &obj_req->img_extents,
2201 &obj_req->num_img_extents);
2205 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2206 rbd_dev->parent_overlap);
2210 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2212 struct rbd_obj_request *obj_req = osd_req->r_priv;
2214 switch (obj_req->img_request->data_type) {
2215 case OBJ_REQUEST_BIO:
2216 osd_req_op_extent_osd_data_bio(osd_req, which,
2218 obj_req->ex.oe_len);
2220 case OBJ_REQUEST_BVECS:
2221 case OBJ_REQUEST_OWN_BVECS:
2222 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2223 obj_req->ex.oe_len);
2224 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2225 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2226 &obj_req->bvec_pos);
2233 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2235 struct page **pages;
2238 * The response data for a STAT call consists of:
2245 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2247 return PTR_ERR(pages);
2249 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2250 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2251 8 + sizeof(struct ceph_timespec),
2256 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2259 struct rbd_obj_request *obj_req = osd_req->r_priv;
2262 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2266 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2267 obj_req->copyup_bvec_count, bytes);
2271 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2273 obj_req->read_state = RBD_OBJ_READ_START;
2277 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2280 struct rbd_obj_request *obj_req = osd_req->r_priv;
2281 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2284 if (!use_object_map(rbd_dev) ||
2285 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2286 osd_req_op_alloc_hint_init(osd_req, which++,
2287 rbd_dev->layout.object_size,
2288 rbd_dev->layout.object_size);
2291 if (rbd_obj_is_entire(obj_req))
2292 opcode = CEPH_OSD_OP_WRITEFULL;
2294 opcode = CEPH_OSD_OP_WRITE;
2296 osd_req_op_extent_init(osd_req, which, opcode,
2297 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2298 rbd_osd_setup_data(osd_req, which);
2301 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2305 /* reverse map the entire object onto the parent */
2306 ret = rbd_obj_calc_img_extents(obj_req, true);
2310 if (rbd_obj_copyup_enabled(obj_req))
2311 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2313 obj_req->write_state = RBD_OBJ_WRITE_START;
2317 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2319 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2323 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2326 struct rbd_obj_request *obj_req = osd_req->r_priv;
2328 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2329 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2330 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2332 osd_req_op_extent_init(osd_req, which,
2333 truncate_or_zero_opcode(obj_req),
2334 obj_req->ex.oe_off, obj_req->ex.oe_len,
2339 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2341 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2346 * Align the range to alloc_size boundary and punt on discards
2347 * that are too small to free up any space.
2349 * alloc_size == object_size && is_tail() is a special case for
2350 * filestore with filestore_punch_hole = false, needed to allow
2351 * truncate (in addition to delete).
2353 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2354 !rbd_obj_is_tail(obj_req)) {
2355 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2356 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2357 rbd_dev->opts->alloc_size);
2358 if (off >= next_off)
2361 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2362 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2363 off, next_off - off);
2364 obj_req->ex.oe_off = off;
2365 obj_req->ex.oe_len = next_off - off;
2368 /* reverse map the entire object onto the parent */
2369 ret = rbd_obj_calc_img_extents(obj_req, true);
2373 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2374 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2375 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2377 obj_req->write_state = RBD_OBJ_WRITE_START;
2381 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2384 struct rbd_obj_request *obj_req = osd_req->r_priv;
2387 if (rbd_obj_is_entire(obj_req)) {
2388 if (obj_req->num_img_extents) {
2389 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2390 osd_req_op_init(osd_req, which++,
2391 CEPH_OSD_OP_CREATE, 0);
2392 opcode = CEPH_OSD_OP_TRUNCATE;
2394 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2395 osd_req_op_init(osd_req, which++,
2396 CEPH_OSD_OP_DELETE, 0);
2400 opcode = truncate_or_zero_opcode(obj_req);
2404 osd_req_op_extent_init(osd_req, which, opcode,
2405 obj_req->ex.oe_off, obj_req->ex.oe_len,
2409 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2413 /* reverse map the entire object onto the parent */
2414 ret = rbd_obj_calc_img_extents(obj_req, true);
2418 if (rbd_obj_copyup_enabled(obj_req))
2419 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2420 if (!obj_req->num_img_extents) {
2421 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2422 if (rbd_obj_is_entire(obj_req))
2423 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2426 obj_req->write_state = RBD_OBJ_WRITE_START;
2430 static int count_write_ops(struct rbd_obj_request *obj_req)
2432 struct rbd_img_request *img_req = obj_req->img_request;
2434 switch (img_req->op_type) {
2436 if (!use_object_map(img_req->rbd_dev) ||
2437 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2438 return 2; /* setallochint + write/writefull */
2440 return 1; /* write/writefull */
2441 case OBJ_OP_DISCARD:
2442 return 1; /* delete/truncate/zero */
2443 case OBJ_OP_ZEROOUT:
2444 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2445 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2446 return 2; /* create + truncate */
2448 return 1; /* delete/truncate/zero */
2454 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2457 struct rbd_obj_request *obj_req = osd_req->r_priv;
2459 switch (obj_req->img_request->op_type) {
2461 __rbd_osd_setup_write_ops(osd_req, which);
2463 case OBJ_OP_DISCARD:
2464 __rbd_osd_setup_discard_ops(osd_req, which);
2466 case OBJ_OP_ZEROOUT:
2467 __rbd_osd_setup_zeroout_ops(osd_req, which);
2475 * Prune the list of object requests (adjust offset and/or length, drop
2476 * redundant requests). Prepare object request state machines and image
2477 * request state machine for execution.
2479 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2481 struct rbd_obj_request *obj_req, *next_obj_req;
2484 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2485 switch (img_req->op_type) {
2487 ret = rbd_obj_init_read(obj_req);
2490 ret = rbd_obj_init_write(obj_req);
2492 case OBJ_OP_DISCARD:
2493 ret = rbd_obj_init_discard(obj_req);
2495 case OBJ_OP_ZEROOUT:
2496 ret = rbd_obj_init_zeroout(obj_req);
2504 rbd_img_obj_request_del(img_req, obj_req);
2509 img_req->state = RBD_IMG_START;
2513 union rbd_img_fill_iter {
2514 struct ceph_bio_iter bio_iter;
2515 struct ceph_bvec_iter bvec_iter;
2518 struct rbd_img_fill_ctx {
2519 enum obj_request_type pos_type;
2520 union rbd_img_fill_iter *pos;
2521 union rbd_img_fill_iter iter;
2522 ceph_object_extent_fn_t set_pos_fn;
2523 ceph_object_extent_fn_t count_fn;
2524 ceph_object_extent_fn_t copy_fn;
2527 static struct ceph_object_extent *alloc_object_extent(void *arg)
2529 struct rbd_img_request *img_req = arg;
2530 struct rbd_obj_request *obj_req;
2532 obj_req = rbd_obj_request_create();
2536 rbd_img_obj_request_add(img_req, obj_req);
2537 return &obj_req->ex;
2541 * While su != os && sc == 1 is technically not fancy (it's the same
2542 * layout as su == os && sc == 1), we can't use the nocopy path for it
2543 * because ->set_pos_fn() should be called only once per object.
2544 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2545 * treat su != os && sc == 1 as fancy.
2547 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2549 return l->stripe_unit != l->object_size;
2552 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2553 struct ceph_file_extent *img_extents,
2554 u32 num_img_extents,
2555 struct rbd_img_fill_ctx *fctx)
2560 img_req->data_type = fctx->pos_type;
2563 * Create object requests and set each object request's starting
2564 * position in the provided bio (list) or bio_vec array.
2566 fctx->iter = *fctx->pos;
2567 for (i = 0; i < num_img_extents; i++) {
2568 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2569 img_extents[i].fe_off,
2570 img_extents[i].fe_len,
2571 &img_req->object_extents,
2572 alloc_object_extent, img_req,
2573 fctx->set_pos_fn, &fctx->iter);
2578 return __rbd_img_fill_request(img_req);
2582 * Map a list of image extents to a list of object extents, create the
2583 * corresponding object requests (normally each to a different object,
2584 * but not always) and add them to @img_req. For each object request,
2585 * set up its data descriptor to point to the corresponding chunk(s) of
2586 * @fctx->pos data buffer.
2588 * Because ceph_file_to_extents() will merge adjacent object extents
2589 * together, each object request's data descriptor may point to multiple
2590 * different chunks of @fctx->pos data buffer.
2592 * @fctx->pos data buffer is assumed to be large enough.
2594 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2595 struct ceph_file_extent *img_extents,
2596 u32 num_img_extents,
2597 struct rbd_img_fill_ctx *fctx)
2599 struct rbd_device *rbd_dev = img_req->rbd_dev;
2600 struct rbd_obj_request *obj_req;
2604 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2605 !rbd_layout_is_fancy(&rbd_dev->layout))
2606 return rbd_img_fill_request_nocopy(img_req, img_extents,
2607 num_img_extents, fctx);
2609 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2612 * Create object requests and determine ->bvec_count for each object
2613 * request. Note that ->bvec_count sum over all object requests may
2614 * be greater than the number of bio_vecs in the provided bio (list)
2615 * or bio_vec array because when mapped, those bio_vecs can straddle
2616 * stripe unit boundaries.
2618 fctx->iter = *fctx->pos;
2619 for (i = 0; i < num_img_extents; i++) {
2620 ret = ceph_file_to_extents(&rbd_dev->layout,
2621 img_extents[i].fe_off,
2622 img_extents[i].fe_len,
2623 &img_req->object_extents,
2624 alloc_object_extent, img_req,
2625 fctx->count_fn, &fctx->iter);
2630 for_each_obj_request(img_req, obj_req) {
2631 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2632 sizeof(*obj_req->bvec_pos.bvecs),
2634 if (!obj_req->bvec_pos.bvecs)
2639 * Fill in each object request's private bio_vec array, splitting and
2640 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2642 fctx->iter = *fctx->pos;
2643 for (i = 0; i < num_img_extents; i++) {
2644 ret = ceph_iterate_extents(&rbd_dev->layout,
2645 img_extents[i].fe_off,
2646 img_extents[i].fe_len,
2647 &img_req->object_extents,
2648 fctx->copy_fn, &fctx->iter);
2653 return __rbd_img_fill_request(img_req);
2656 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2659 struct ceph_file_extent ex = { off, len };
2660 union rbd_img_fill_iter dummy = {};
2661 struct rbd_img_fill_ctx fctx = {
2662 .pos_type = OBJ_REQUEST_NODATA,
2666 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2669 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2671 struct rbd_obj_request *obj_req =
2672 container_of(ex, struct rbd_obj_request, ex);
2673 struct ceph_bio_iter *it = arg;
2675 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2676 obj_req->bio_pos = *it;
2677 ceph_bio_iter_advance(it, bytes);
2680 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2682 struct rbd_obj_request *obj_req =
2683 container_of(ex, struct rbd_obj_request, ex);
2684 struct ceph_bio_iter *it = arg;
2686 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2687 ceph_bio_iter_advance_step(it, bytes, ({
2688 obj_req->bvec_count++;
2693 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2695 struct rbd_obj_request *obj_req =
2696 container_of(ex, struct rbd_obj_request, ex);
2697 struct ceph_bio_iter *it = arg;
2699 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2700 ceph_bio_iter_advance_step(it, bytes, ({
2701 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2702 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2706 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2707 struct ceph_file_extent *img_extents,
2708 u32 num_img_extents,
2709 struct ceph_bio_iter *bio_pos)
2711 struct rbd_img_fill_ctx fctx = {
2712 .pos_type = OBJ_REQUEST_BIO,
2713 .pos = (union rbd_img_fill_iter *)bio_pos,
2714 .set_pos_fn = set_bio_pos,
2715 .count_fn = count_bio_bvecs,
2716 .copy_fn = copy_bio_bvecs,
2719 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2723 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2724 u64 off, u64 len, struct bio *bio)
2726 struct ceph_file_extent ex = { off, len };
2727 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2729 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2732 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2734 struct rbd_obj_request *obj_req =
2735 container_of(ex, struct rbd_obj_request, ex);
2736 struct ceph_bvec_iter *it = arg;
2738 obj_req->bvec_pos = *it;
2739 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2740 ceph_bvec_iter_advance(it, bytes);
2743 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2745 struct rbd_obj_request *obj_req =
2746 container_of(ex, struct rbd_obj_request, ex);
2747 struct ceph_bvec_iter *it = arg;
2749 ceph_bvec_iter_advance_step(it, bytes, ({
2750 obj_req->bvec_count++;
2754 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2756 struct rbd_obj_request *obj_req =
2757 container_of(ex, struct rbd_obj_request, ex);
2758 struct ceph_bvec_iter *it = arg;
2760 ceph_bvec_iter_advance_step(it, bytes, ({
2761 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2762 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2766 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2767 struct ceph_file_extent *img_extents,
2768 u32 num_img_extents,
2769 struct ceph_bvec_iter *bvec_pos)
2771 struct rbd_img_fill_ctx fctx = {
2772 .pos_type = OBJ_REQUEST_BVECS,
2773 .pos = (union rbd_img_fill_iter *)bvec_pos,
2774 .set_pos_fn = set_bvec_pos,
2775 .count_fn = count_bvecs,
2776 .copy_fn = copy_bvecs,
2779 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2783 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2784 struct ceph_file_extent *img_extents,
2785 u32 num_img_extents,
2786 struct bio_vec *bvecs)
2788 struct ceph_bvec_iter it = {
2790 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2794 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2798 static void rbd_img_handle_request_work(struct work_struct *work)
2800 struct rbd_img_request *img_req =
2801 container_of(work, struct rbd_img_request, work);
2803 rbd_img_handle_request(img_req, img_req->work_result);
2806 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2808 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2809 img_req->work_result = result;
2810 queue_work(rbd_wq, &img_req->work);
2813 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2815 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2817 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2818 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2822 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2823 obj_req->ex.oe_objno);
2827 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2829 struct ceph_osd_request *osd_req;
2832 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2833 if (IS_ERR(osd_req))
2834 return PTR_ERR(osd_req);
2836 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2837 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2838 rbd_osd_setup_data(osd_req, 0);
2839 rbd_osd_format_read(osd_req);
2841 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2845 rbd_osd_submit(osd_req);
2849 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2851 struct rbd_img_request *img_req = obj_req->img_request;
2852 struct rbd_img_request *child_img_req;
2855 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2860 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2861 child_img_req->obj_request = obj_req;
2863 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2866 if (!rbd_img_is_write(img_req)) {
2867 switch (img_req->data_type) {
2868 case OBJ_REQUEST_BIO:
2869 ret = __rbd_img_fill_from_bio(child_img_req,
2870 obj_req->img_extents,
2871 obj_req->num_img_extents,
2874 case OBJ_REQUEST_BVECS:
2875 case OBJ_REQUEST_OWN_BVECS:
2876 ret = __rbd_img_fill_from_bvecs(child_img_req,
2877 obj_req->img_extents,
2878 obj_req->num_img_extents,
2879 &obj_req->bvec_pos);
2885 ret = rbd_img_fill_from_bvecs(child_img_req,
2886 obj_req->img_extents,
2887 obj_req->num_img_extents,
2888 obj_req->copyup_bvecs);
2891 rbd_img_request_put(child_img_req);
2895 /* avoid parent chain recursion */
2896 rbd_img_schedule(child_img_req, 0);
2900 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2902 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2906 switch (obj_req->read_state) {
2907 case RBD_OBJ_READ_START:
2908 rbd_assert(!*result);
2910 if (!rbd_obj_may_exist(obj_req)) {
2912 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2916 ret = rbd_obj_read_object(obj_req);
2921 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2923 case RBD_OBJ_READ_OBJECT:
2924 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2925 /* reverse map this object extent onto the parent */
2926 ret = rbd_obj_calc_img_extents(obj_req, false);
2931 if (obj_req->num_img_extents) {
2932 ret = rbd_obj_read_from_parent(obj_req);
2937 obj_req->read_state = RBD_OBJ_READ_PARENT;
2943 * -ENOENT means a hole in the image -- zero-fill the entire
2944 * length of the request. A short read also implies zero-fill
2945 * to the end of the request.
2947 if (*result == -ENOENT) {
2948 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2950 } else if (*result >= 0) {
2951 if (*result < obj_req->ex.oe_len)
2952 rbd_obj_zero_range(obj_req, *result,
2953 obj_req->ex.oe_len - *result);
2955 rbd_assert(*result == obj_req->ex.oe_len);
2959 case RBD_OBJ_READ_PARENT:
2961 * The parent image is read only up to the overlap -- zero-fill
2962 * from the overlap to the end of the request.
2965 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2967 if (obj_overlap < obj_req->ex.oe_len)
2968 rbd_obj_zero_range(obj_req, obj_overlap,
2969 obj_req->ex.oe_len - obj_overlap);
2977 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2979 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2981 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2982 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2984 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2985 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2986 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2995 * 0 - object map update sent
2996 * 1 - object map update isn't needed
2999 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
3001 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3004 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3007 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3008 new_state = OBJECT_PENDING;
3010 new_state = OBJECT_EXISTS;
3012 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3015 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3017 struct ceph_osd_request *osd_req;
3018 int num_ops = count_write_ops(obj_req);
3022 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3023 num_ops++; /* stat */
3025 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3026 if (IS_ERR(osd_req))
3027 return PTR_ERR(osd_req);
3029 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3030 ret = rbd_osd_setup_stat(osd_req, which++);
3035 rbd_osd_setup_write_ops(osd_req, which);
3036 rbd_osd_format_write(osd_req);
3038 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3042 rbd_osd_submit(osd_req);
3047 * copyup_bvecs pages are never highmem pages
3049 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3051 struct ceph_bvec_iter it = {
3053 .iter = { .bi_size = bytes },
3056 ceph_bvec_iter_advance_step(&it, bytes, ({
3057 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3064 #define MODS_ONLY U32_MAX
3066 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3069 struct ceph_osd_request *osd_req;
3072 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3073 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3075 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3076 if (IS_ERR(osd_req))
3077 return PTR_ERR(osd_req);
3079 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3083 rbd_osd_format_write(osd_req);
3085 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3089 rbd_osd_submit(osd_req);
3093 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3096 struct ceph_osd_request *osd_req;
3097 int num_ops = count_write_ops(obj_req);
3101 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3103 if (bytes != MODS_ONLY)
3104 num_ops++; /* copyup */
3106 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3107 if (IS_ERR(osd_req))
3108 return PTR_ERR(osd_req);
3110 if (bytes != MODS_ONLY) {
3111 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3116 rbd_osd_setup_write_ops(osd_req, which);
3117 rbd_osd_format_write(osd_req);
3119 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3123 rbd_osd_submit(osd_req);
3127 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3131 rbd_assert(!obj_req->copyup_bvecs);
3132 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3133 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3134 sizeof(*obj_req->copyup_bvecs),
3136 if (!obj_req->copyup_bvecs)
3139 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3140 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3142 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3143 if (!obj_req->copyup_bvecs[i].bv_page)
3146 obj_req->copyup_bvecs[i].bv_offset = 0;
3147 obj_req->copyup_bvecs[i].bv_len = len;
3151 rbd_assert(!obj_overlap);
3156 * The target object doesn't exist. Read the data for the entire
3157 * target object up to the overlap point (if any) from the parent,
3158 * so we can use it for a copyup.
3160 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3162 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3165 rbd_assert(obj_req->num_img_extents);
3166 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3167 rbd_dev->parent_overlap);
3168 if (!obj_req->num_img_extents) {
3170 * The overlap has become 0 (most likely because the
3171 * image has been flattened). Re-submit the original write
3172 * request -- pass MODS_ONLY since the copyup isn't needed
3175 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3178 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3182 return rbd_obj_read_from_parent(obj_req);
3185 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3187 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3188 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3193 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3195 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3198 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3201 for (i = 0; i < snapc->num_snaps; i++) {
3202 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3203 i + 1 < snapc->num_snaps)
3204 new_state = OBJECT_EXISTS_CLEAN;
3206 new_state = OBJECT_EXISTS;
3208 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3211 obj_req->pending.result = ret;
3216 obj_req->pending.num_pending++;
3220 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3222 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3225 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3228 * Only send non-zero copyup data to save some I/O and network
3229 * bandwidth -- zero copyup data is equivalent to the object not
3232 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3235 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3237 * Send a copyup request with an empty snapshot context to
3238 * deep-copyup the object through all existing snapshots.
3239 * A second request with the current snapshot context will be
3240 * sent for the actual modification.
3242 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3244 obj_req->pending.result = ret;
3248 obj_req->pending.num_pending++;
3252 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3254 obj_req->pending.result = ret;
3258 obj_req->pending.num_pending++;
3261 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3263 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3267 switch (obj_req->copyup_state) {
3268 case RBD_OBJ_COPYUP_START:
3269 rbd_assert(!*result);
3271 ret = rbd_obj_copyup_read_parent(obj_req);
3276 if (obj_req->num_img_extents)
3277 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3279 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3281 case RBD_OBJ_COPYUP_READ_PARENT:
3285 if (is_zero_bvecs(obj_req->copyup_bvecs,
3286 rbd_obj_img_extents_bytes(obj_req))) {
3287 dout("%s %p detected zeros\n", __func__, obj_req);
3288 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3291 rbd_obj_copyup_object_maps(obj_req);
3292 if (!obj_req->pending.num_pending) {
3293 *result = obj_req->pending.result;
3294 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3297 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3299 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3300 if (!pending_result_dec(&obj_req->pending, result))
3303 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3305 rbd_warn(rbd_dev, "snap object map update failed: %d",
3310 rbd_obj_copyup_write_object(obj_req);
3311 if (!obj_req->pending.num_pending) {
3312 *result = obj_req->pending.result;
3313 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3316 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3318 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3319 if (!pending_result_dec(&obj_req->pending, result))
3322 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3331 * 0 - object map update sent
3332 * 1 - object map update isn't needed
3335 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3337 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3338 u8 current_state = OBJECT_PENDING;
3340 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3343 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3346 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3350 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3352 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3356 switch (obj_req->write_state) {
3357 case RBD_OBJ_WRITE_START:
3358 rbd_assert(!*result);
3360 if (rbd_obj_write_is_noop(obj_req))
3363 ret = rbd_obj_write_pre_object_map(obj_req);
3368 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3372 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3374 rbd_warn(rbd_dev, "pre object map update failed: %d",
3378 ret = rbd_obj_write_object(obj_req);
3383 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3385 case RBD_OBJ_WRITE_OBJECT:
3386 if (*result == -ENOENT) {
3387 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3389 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3390 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3394 * On a non-existent object:
3395 * delete - -ENOENT, truncate/zero - 0
3397 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3403 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3405 case __RBD_OBJ_WRITE_COPYUP:
3406 if (!rbd_obj_advance_copyup(obj_req, result))
3409 case RBD_OBJ_WRITE_COPYUP:
3411 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3414 ret = rbd_obj_write_post_object_map(obj_req);
3419 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3423 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3425 rbd_warn(rbd_dev, "post object map update failed: %d",
3434 * Return true if @obj_req is completed.
3436 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3439 struct rbd_img_request *img_req = obj_req->img_request;
3440 struct rbd_device *rbd_dev = img_req->rbd_dev;
3443 mutex_lock(&obj_req->state_mutex);
3444 if (!rbd_img_is_write(img_req))
3445 done = rbd_obj_advance_read(obj_req, result);
3447 done = rbd_obj_advance_write(obj_req, result);
3448 mutex_unlock(&obj_req->state_mutex);
3450 if (done && *result) {
3451 rbd_assert(*result < 0);
3452 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3453 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3454 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3460 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3463 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3465 if (__rbd_obj_handle_request(obj_req, &result))
3466 rbd_img_handle_request(obj_req->img_request, result);
3469 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3471 struct rbd_device *rbd_dev = img_req->rbd_dev;
3473 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3476 if (rbd_is_ro(rbd_dev))
3479 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3480 if (rbd_dev->opts->lock_on_read ||
3481 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3484 return rbd_img_is_write(img_req);
3487 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3489 struct rbd_device *rbd_dev = img_req->rbd_dev;
3492 lockdep_assert_held(&rbd_dev->lock_rwsem);
3493 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3494 spin_lock(&rbd_dev->lock_lists_lock);
3495 rbd_assert(list_empty(&img_req->lock_item));
3497 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3499 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3500 spin_unlock(&rbd_dev->lock_lists_lock);
3504 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3506 struct rbd_device *rbd_dev = img_req->rbd_dev;
3509 lockdep_assert_held(&rbd_dev->lock_rwsem);
3510 spin_lock(&rbd_dev->lock_lists_lock);
3511 rbd_assert(!list_empty(&img_req->lock_item));
3512 list_del_init(&img_req->lock_item);
3513 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3514 list_empty(&rbd_dev->running_list));
3515 spin_unlock(&rbd_dev->lock_lists_lock);
3517 complete(&rbd_dev->releasing_wait);
3520 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3522 struct rbd_device *rbd_dev = img_req->rbd_dev;
3524 if (!need_exclusive_lock(img_req))
3527 if (rbd_lock_add_request(img_req))
3530 if (rbd_dev->opts->exclusive) {
3531 WARN_ON(1); /* lock got released? */
3536 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3537 * and cancel_delayed_work() in wake_lock_waiters().
3539 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3540 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3544 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3546 struct rbd_obj_request *obj_req;
3548 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3550 for_each_obj_request(img_req, obj_req) {
3553 if (__rbd_obj_handle_request(obj_req, &result)) {
3555 img_req->pending.result = result;
3559 img_req->pending.num_pending++;
3564 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3566 struct rbd_device *rbd_dev = img_req->rbd_dev;
3570 switch (img_req->state) {
3572 rbd_assert(!*result);
3574 ret = rbd_img_exclusive_lock(img_req);
3579 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3583 case RBD_IMG_EXCLUSIVE_LOCK:
3587 rbd_assert(!need_exclusive_lock(img_req) ||
3588 __rbd_is_lock_owner(rbd_dev));
3590 rbd_img_object_requests(img_req);
3591 if (!img_req->pending.num_pending) {
3592 *result = img_req->pending.result;
3593 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3596 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3598 case __RBD_IMG_OBJECT_REQUESTS:
3599 if (!pending_result_dec(&img_req->pending, result))
3602 case RBD_IMG_OBJECT_REQUESTS:
3610 * Return true if @img_req is completed.
3612 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3615 struct rbd_device *rbd_dev = img_req->rbd_dev;
3618 if (need_exclusive_lock(img_req)) {
3619 down_read(&rbd_dev->lock_rwsem);
3620 mutex_lock(&img_req->state_mutex);
3621 done = rbd_img_advance(img_req, result);
3623 rbd_lock_del_request(img_req);
3624 mutex_unlock(&img_req->state_mutex);
3625 up_read(&rbd_dev->lock_rwsem);
3627 mutex_lock(&img_req->state_mutex);
3628 done = rbd_img_advance(img_req, result);
3629 mutex_unlock(&img_req->state_mutex);
3632 if (done && *result) {
3633 rbd_assert(*result < 0);
3634 rbd_warn(rbd_dev, "%s%s result %d",
3635 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3636 obj_op_name(img_req->op_type), *result);
3641 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3644 if (!__rbd_img_handle_request(img_req, &result))
3647 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3648 struct rbd_obj_request *obj_req = img_req->obj_request;
3650 rbd_img_request_put(img_req);
3651 if (__rbd_obj_handle_request(obj_req, &result)) {
3652 img_req = obj_req->img_request;
3656 struct request *rq = img_req->rq;
3658 rbd_img_request_put(img_req);
3659 blk_mq_end_request(rq, errno_to_blk_status(result));
3663 static const struct rbd_client_id rbd_empty_cid;
3665 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3666 const struct rbd_client_id *rhs)
3668 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3671 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3673 struct rbd_client_id cid;
3675 mutex_lock(&rbd_dev->watch_mutex);
3676 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3677 cid.handle = rbd_dev->watch_cookie;
3678 mutex_unlock(&rbd_dev->watch_mutex);
3683 * lock_rwsem must be held for write
3685 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3686 const struct rbd_client_id *cid)
3688 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3689 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3690 cid->gid, cid->handle);
3691 rbd_dev->owner_cid = *cid; /* struct */
3694 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3696 mutex_lock(&rbd_dev->watch_mutex);
3697 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3698 mutex_unlock(&rbd_dev->watch_mutex);
3701 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3703 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3705 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3706 strcpy(rbd_dev->lock_cookie, cookie);
3707 rbd_set_owner_cid(rbd_dev, &cid);
3708 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3712 * lock_rwsem must be held for write
3714 static int rbd_lock(struct rbd_device *rbd_dev)
3716 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3720 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3721 rbd_dev->lock_cookie[0] != '\0');
3723 format_lock_cookie(rbd_dev, cookie);
3724 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3725 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3726 RBD_LOCK_TAG, "", 0);
3730 __rbd_lock(rbd_dev, cookie);
3735 * lock_rwsem must be held for write
3737 static void rbd_unlock(struct rbd_device *rbd_dev)
3739 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3742 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3743 rbd_dev->lock_cookie[0] == '\0');
3745 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3746 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3747 if (ret && ret != -ENOENT)
3748 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3750 /* treat errors as the image is unlocked */
3751 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3752 rbd_dev->lock_cookie[0] = '\0';
3753 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3754 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3757 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3758 enum rbd_notify_op notify_op,
3759 struct page ***preply_pages,
3762 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3763 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3764 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3765 int buf_size = sizeof(buf);
3768 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3770 /* encode *LockPayload NotifyMessage (op + ClientId) */
3771 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3772 ceph_encode_32(&p, notify_op);
3773 ceph_encode_64(&p, cid.gid);
3774 ceph_encode_64(&p, cid.handle);
3776 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3777 &rbd_dev->header_oloc, buf, buf_size,
3778 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3781 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3782 enum rbd_notify_op notify_op)
3784 struct page **reply_pages;
3787 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3788 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3791 static void rbd_notify_acquired_lock(struct work_struct *work)
3793 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3794 acquired_lock_work);
3796 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3799 static void rbd_notify_released_lock(struct work_struct *work)
3801 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3802 released_lock_work);
3804 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3807 static int rbd_request_lock(struct rbd_device *rbd_dev)
3809 struct page **reply_pages;
3811 bool lock_owner_responded = false;
3814 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3816 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3817 &reply_pages, &reply_len);
3818 if (ret && ret != -ETIMEDOUT) {
3819 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3823 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3824 void *p = page_address(reply_pages[0]);
3825 void *const end = p + reply_len;
3828 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3833 ceph_decode_need(&p, end, 8 + 8, e_inval);
3834 p += 8 + 8; /* skip gid and cookie */
3836 ceph_decode_32_safe(&p, end, len, e_inval);
3840 if (lock_owner_responded) {
3842 "duplicate lock owners detected");
3847 lock_owner_responded = true;
3848 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3852 "failed to decode ResponseMessage: %d",
3857 ret = ceph_decode_32(&p);
3861 if (!lock_owner_responded) {
3862 rbd_warn(rbd_dev, "no lock owners detected");
3867 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3876 * Either image request state machine(s) or rbd_add_acquire_lock()
3879 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3881 struct rbd_img_request *img_req;
3883 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3884 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3886 cancel_delayed_work(&rbd_dev->lock_dwork);
3887 if (!completion_done(&rbd_dev->acquire_wait)) {
3888 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3889 list_empty(&rbd_dev->running_list));
3890 rbd_dev->acquire_err = result;
3891 complete_all(&rbd_dev->acquire_wait);
3895 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3896 mutex_lock(&img_req->state_mutex);
3897 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3898 rbd_img_schedule(img_req, result);
3899 mutex_unlock(&img_req->state_mutex);
3902 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3905 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3906 struct ceph_locker **lockers, u32 *num_lockers)
3908 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3913 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3915 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3916 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3917 &lock_type, &lock_tag, lockers, num_lockers);
3921 if (*num_lockers == 0) {
3922 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3926 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3927 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3933 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3934 rbd_warn(rbd_dev, "shared lock type detected");
3939 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3940 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3941 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3942 (*lockers)[0].id.cookie);
3952 static int find_watcher(struct rbd_device *rbd_dev,
3953 const struct ceph_locker *locker)
3955 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3956 struct ceph_watch_item *watchers;
3962 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3963 &rbd_dev->header_oloc, &watchers,
3968 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3969 for (i = 0; i < num_watchers; i++) {
3970 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3971 sizeof(locker->info.addr)) &&
3972 watchers[i].cookie == cookie) {
3973 struct rbd_client_id cid = {
3974 .gid = le64_to_cpu(watchers[i].name.num),
3978 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3979 rbd_dev, cid.gid, cid.handle);
3980 rbd_set_owner_cid(rbd_dev, &cid);
3986 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3994 * lock_rwsem must be held for write
3996 static int rbd_try_lock(struct rbd_device *rbd_dev)
3998 struct ceph_client *client = rbd_dev->rbd_client->client;
3999 struct ceph_locker *lockers;
4004 ret = rbd_lock(rbd_dev);
4008 /* determine if the current lock holder is still alive */
4009 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
4013 if (num_lockers == 0)
4016 ret = find_watcher(rbd_dev, lockers);
4018 goto out; /* request lock or error */
4020 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4021 ENTITY_NAME(lockers[0].id.name));
4023 ret = ceph_monc_blacklist_add(&client->monc,
4024 &lockers[0].info.addr);
4026 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
4027 ENTITY_NAME(lockers[0].id.name), ret);
4031 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4032 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4033 lockers[0].id.cookie,
4034 &lockers[0].id.name);
4035 if (ret && ret != -ENOENT)
4039 ceph_free_lockers(lockers, num_lockers);
4043 ceph_free_lockers(lockers, num_lockers);
4047 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4051 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4052 ret = rbd_object_map_open(rbd_dev);
4063 * 1 - caller should call rbd_request_lock()
4066 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4070 down_read(&rbd_dev->lock_rwsem);
4071 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4072 rbd_dev->lock_state);
4073 if (__rbd_is_lock_owner(rbd_dev)) {
4074 up_read(&rbd_dev->lock_rwsem);
4078 up_read(&rbd_dev->lock_rwsem);
4079 down_write(&rbd_dev->lock_rwsem);
4080 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4081 rbd_dev->lock_state);
4082 if (__rbd_is_lock_owner(rbd_dev)) {
4083 up_write(&rbd_dev->lock_rwsem);
4087 ret = rbd_try_lock(rbd_dev);
4089 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4090 if (ret == -EBLACKLISTED)
4093 ret = 1; /* request lock anyway */
4096 up_write(&rbd_dev->lock_rwsem);
4100 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4101 rbd_assert(list_empty(&rbd_dev->running_list));
4103 ret = rbd_post_acquire_action(rbd_dev);
4105 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4107 * Can't stay in RBD_LOCK_STATE_LOCKED because
4108 * rbd_lock_add_request() would let the request through,
4109 * assuming that e.g. object map is locked and loaded.
4111 rbd_unlock(rbd_dev);
4115 wake_lock_waiters(rbd_dev, ret);
4116 up_write(&rbd_dev->lock_rwsem);
4120 static void rbd_acquire_lock(struct work_struct *work)
4122 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4123 struct rbd_device, lock_dwork);
4126 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4128 ret = rbd_try_acquire_lock(rbd_dev);
4130 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4134 ret = rbd_request_lock(rbd_dev);
4135 if (ret == -ETIMEDOUT) {
4136 goto again; /* treat this as a dead client */
4137 } else if (ret == -EROFS) {
4138 rbd_warn(rbd_dev, "peer will not release lock");
4139 down_write(&rbd_dev->lock_rwsem);
4140 wake_lock_waiters(rbd_dev, ret);
4141 up_write(&rbd_dev->lock_rwsem);
4142 } else if (ret < 0) {
4143 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4144 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4148 * lock owner acked, but resend if we don't see them
4151 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4153 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4154 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4158 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4162 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4163 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4165 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4169 * Ensure that all in-flight IO is flushed.
4171 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4172 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4173 need_wait = !list_empty(&rbd_dev->running_list);
4174 downgrade_write(&rbd_dev->lock_rwsem);
4176 wait_for_completion(&rbd_dev->releasing_wait);
4177 up_read(&rbd_dev->lock_rwsem);
4179 down_write(&rbd_dev->lock_rwsem);
4180 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4183 rbd_assert(list_empty(&rbd_dev->running_list));
4187 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4189 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4190 rbd_object_map_close(rbd_dev);
4193 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4195 rbd_assert(list_empty(&rbd_dev->running_list));
4197 rbd_pre_release_action(rbd_dev);
4198 rbd_unlock(rbd_dev);
4202 * lock_rwsem must be held for write
4204 static void rbd_release_lock(struct rbd_device *rbd_dev)
4206 if (!rbd_quiesce_lock(rbd_dev))
4209 __rbd_release_lock(rbd_dev);
4212 * Give others a chance to grab the lock - we would re-acquire
4213 * almost immediately if we got new IO while draining the running
4214 * list otherwise. We need to ack our own notifications, so this
4215 * lock_dwork will be requeued from rbd_handle_released_lock() by
4216 * way of maybe_kick_acquire().
4218 cancel_delayed_work(&rbd_dev->lock_dwork);
4221 static void rbd_release_lock_work(struct work_struct *work)
4223 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4226 down_write(&rbd_dev->lock_rwsem);
4227 rbd_release_lock(rbd_dev);
4228 up_write(&rbd_dev->lock_rwsem);
4231 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4235 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4236 if (__rbd_is_lock_owner(rbd_dev))
4239 spin_lock(&rbd_dev->lock_lists_lock);
4240 have_requests = !list_empty(&rbd_dev->acquiring_list);
4241 spin_unlock(&rbd_dev->lock_lists_lock);
4242 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4243 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4244 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4248 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4251 struct rbd_client_id cid = { 0 };
4253 if (struct_v >= 2) {
4254 cid.gid = ceph_decode_64(p);
4255 cid.handle = ceph_decode_64(p);
4258 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4260 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4261 down_write(&rbd_dev->lock_rwsem);
4262 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4264 * we already know that the remote client is
4267 up_write(&rbd_dev->lock_rwsem);
4271 rbd_set_owner_cid(rbd_dev, &cid);
4272 downgrade_write(&rbd_dev->lock_rwsem);
4274 down_read(&rbd_dev->lock_rwsem);
4277 maybe_kick_acquire(rbd_dev);
4278 up_read(&rbd_dev->lock_rwsem);
4281 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4284 struct rbd_client_id cid = { 0 };
4286 if (struct_v >= 2) {
4287 cid.gid = ceph_decode_64(p);
4288 cid.handle = ceph_decode_64(p);
4291 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4293 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4294 down_write(&rbd_dev->lock_rwsem);
4295 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4296 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4297 __func__, rbd_dev, cid.gid, cid.handle,
4298 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4299 up_write(&rbd_dev->lock_rwsem);
4303 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4304 downgrade_write(&rbd_dev->lock_rwsem);
4306 down_read(&rbd_dev->lock_rwsem);
4309 maybe_kick_acquire(rbd_dev);
4310 up_read(&rbd_dev->lock_rwsem);
4314 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4315 * ResponseMessage is needed.
4317 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4320 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4321 struct rbd_client_id cid = { 0 };
4324 if (struct_v >= 2) {
4325 cid.gid = ceph_decode_64(p);
4326 cid.handle = ceph_decode_64(p);
4329 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4331 if (rbd_cid_equal(&cid, &my_cid))
4334 down_read(&rbd_dev->lock_rwsem);
4335 if (__rbd_is_lock_owner(rbd_dev)) {
4336 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4337 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4341 * encode ResponseMessage(0) so the peer can detect
4346 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4347 if (!rbd_dev->opts->exclusive) {
4348 dout("%s rbd_dev %p queueing unlock_work\n",
4350 queue_work(rbd_dev->task_wq,
4351 &rbd_dev->unlock_work);
4353 /* refuse to release the lock */
4360 up_read(&rbd_dev->lock_rwsem);
4364 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4365 u64 notify_id, u64 cookie, s32 *result)
4367 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4368 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4369 int buf_size = sizeof(buf);
4375 /* encode ResponseMessage */
4376 ceph_start_encoding(&p, 1, 1,
4377 buf_size - CEPH_ENCODING_START_BLK_LEN);
4378 ceph_encode_32(&p, *result);
4383 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4384 &rbd_dev->header_oloc, notify_id, cookie,
4387 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4390 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4393 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4394 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4397 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4398 u64 notify_id, u64 cookie, s32 result)
4400 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4401 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4404 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4405 u64 notifier_id, void *data, size_t data_len)
4407 struct rbd_device *rbd_dev = arg;
4409 void *const end = p + data_len;
4415 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4416 __func__, rbd_dev, cookie, notify_id, data_len);
4418 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4421 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4426 notify_op = ceph_decode_32(&p);
4428 /* legacy notification for header updates */
4429 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4433 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4434 switch (notify_op) {
4435 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4436 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4437 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4439 case RBD_NOTIFY_OP_RELEASED_LOCK:
4440 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4441 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4443 case RBD_NOTIFY_OP_REQUEST_LOCK:
4444 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4446 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4449 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4451 case RBD_NOTIFY_OP_HEADER_UPDATE:
4452 ret = rbd_dev_refresh(rbd_dev);
4454 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4456 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4459 if (rbd_is_lock_owner(rbd_dev))
4460 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4461 cookie, -EOPNOTSUPP);
4463 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4468 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4470 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4472 struct rbd_device *rbd_dev = arg;
4474 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4476 down_write(&rbd_dev->lock_rwsem);
4477 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4478 up_write(&rbd_dev->lock_rwsem);
4480 mutex_lock(&rbd_dev->watch_mutex);
4481 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4482 __rbd_unregister_watch(rbd_dev);
4483 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4485 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4487 mutex_unlock(&rbd_dev->watch_mutex);
4491 * watch_mutex must be locked
4493 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4495 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4496 struct ceph_osd_linger_request *handle;
4498 rbd_assert(!rbd_dev->watch_handle);
4499 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4501 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4502 &rbd_dev->header_oloc, rbd_watch_cb,
4503 rbd_watch_errcb, rbd_dev);
4505 return PTR_ERR(handle);
4507 rbd_dev->watch_handle = handle;
4512 * watch_mutex must be locked
4514 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4516 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4519 rbd_assert(rbd_dev->watch_handle);
4520 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4522 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4524 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4526 rbd_dev->watch_handle = NULL;
4529 static int rbd_register_watch(struct rbd_device *rbd_dev)
4533 mutex_lock(&rbd_dev->watch_mutex);
4534 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4535 ret = __rbd_register_watch(rbd_dev);
4539 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4540 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4543 mutex_unlock(&rbd_dev->watch_mutex);
4547 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4549 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4551 cancel_work_sync(&rbd_dev->acquired_lock_work);
4552 cancel_work_sync(&rbd_dev->released_lock_work);
4553 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4554 cancel_work_sync(&rbd_dev->unlock_work);
4557 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4559 cancel_tasks_sync(rbd_dev);
4561 mutex_lock(&rbd_dev->watch_mutex);
4562 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4563 __rbd_unregister_watch(rbd_dev);
4564 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4565 mutex_unlock(&rbd_dev->watch_mutex);
4567 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4568 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4572 * lock_rwsem must be held for write
4574 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4576 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4580 if (!rbd_quiesce_lock(rbd_dev))
4583 format_lock_cookie(rbd_dev, cookie);
4584 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4585 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4586 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4587 RBD_LOCK_TAG, cookie);
4589 if (ret != -EOPNOTSUPP)
4590 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4594 * Lock cookie cannot be updated on older OSDs, so do
4595 * a manual release and queue an acquire.
4597 __rbd_release_lock(rbd_dev);
4598 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4600 __rbd_lock(rbd_dev, cookie);
4601 wake_lock_waiters(rbd_dev, 0);
4605 static void rbd_reregister_watch(struct work_struct *work)
4607 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4608 struct rbd_device, watch_dwork);
4611 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4613 mutex_lock(&rbd_dev->watch_mutex);
4614 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4615 mutex_unlock(&rbd_dev->watch_mutex);
4619 ret = __rbd_register_watch(rbd_dev);
4621 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4622 if (ret != -EBLACKLISTED && ret != -ENOENT) {
4623 queue_delayed_work(rbd_dev->task_wq,
4624 &rbd_dev->watch_dwork,
4626 mutex_unlock(&rbd_dev->watch_mutex);
4630 mutex_unlock(&rbd_dev->watch_mutex);
4631 down_write(&rbd_dev->lock_rwsem);
4632 wake_lock_waiters(rbd_dev, ret);
4633 up_write(&rbd_dev->lock_rwsem);
4637 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4638 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4639 mutex_unlock(&rbd_dev->watch_mutex);
4641 down_write(&rbd_dev->lock_rwsem);
4642 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4643 rbd_reacquire_lock(rbd_dev);
4644 up_write(&rbd_dev->lock_rwsem);
4646 ret = rbd_dev_refresh(rbd_dev);
4648 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4652 * Synchronous osd object method call. Returns the number of bytes
4653 * returned in the outbound buffer, or a negative error code.
4655 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4656 struct ceph_object_id *oid,
4657 struct ceph_object_locator *oloc,
4658 const char *method_name,
4659 const void *outbound,
4660 size_t outbound_size,
4662 size_t inbound_size)
4664 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4665 struct page *req_page = NULL;
4666 struct page *reply_page;
4670 * Method calls are ultimately read operations. The result
4671 * should placed into the inbound buffer provided. They
4672 * also supply outbound data--parameters for the object
4673 * method. Currently if this is present it will be a
4677 if (outbound_size > PAGE_SIZE)
4680 req_page = alloc_page(GFP_KERNEL);
4684 memcpy(page_address(req_page), outbound, outbound_size);
4687 reply_page = alloc_page(GFP_KERNEL);
4690 __free_page(req_page);
4694 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4695 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4696 &reply_page, &inbound_size);
4698 memcpy(inbound, page_address(reply_page), inbound_size);
4703 __free_page(req_page);
4704 __free_page(reply_page);
4708 static void rbd_queue_workfn(struct work_struct *work)
4710 struct request *rq = blk_mq_rq_from_pdu(work);
4711 struct rbd_device *rbd_dev = rq->q->queuedata;
4712 struct rbd_img_request *img_request;
4713 struct ceph_snap_context *snapc = NULL;
4714 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4715 u64 length = blk_rq_bytes(rq);
4716 enum obj_operation_type op_type;
4720 switch (req_op(rq)) {
4721 case REQ_OP_DISCARD:
4722 op_type = OBJ_OP_DISCARD;
4724 case REQ_OP_WRITE_ZEROES:
4725 op_type = OBJ_OP_ZEROOUT;
4728 op_type = OBJ_OP_WRITE;
4731 op_type = OBJ_OP_READ;
4734 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
4739 /* Ignore/skip any zero-length requests */
4742 dout("%s: zero-length request\n", __func__);
4747 if (op_type != OBJ_OP_READ) {
4748 if (rbd_is_ro(rbd_dev)) {
4749 rbd_warn(rbd_dev, "%s on read-only mapping",
4750 obj_op_name(op_type));
4754 rbd_assert(!rbd_is_snap(rbd_dev));
4757 if (offset && length > U64_MAX - offset + 1) {
4758 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4761 goto err_rq; /* Shouldn't happen */
4764 blk_mq_start_request(rq);
4766 down_read(&rbd_dev->header_rwsem);
4767 mapping_size = rbd_dev->mapping.size;
4768 if (op_type != OBJ_OP_READ) {
4769 snapc = rbd_dev->header.snapc;
4770 ceph_get_snap_context(snapc);
4772 up_read(&rbd_dev->header_rwsem);
4774 if (offset + length > mapping_size) {
4775 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4776 length, mapping_size);
4781 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
4786 img_request->rq = rq;
4787 snapc = NULL; /* img_request consumes a ref */
4789 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4790 img_request, obj_op_name(op_type), offset, length);
4792 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4793 result = rbd_img_fill_nodata(img_request, offset, length);
4795 result = rbd_img_fill_from_bio(img_request, offset, length,
4798 goto err_img_request;
4800 rbd_img_handle_request(img_request, 0);
4804 rbd_img_request_put(img_request);
4807 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4808 obj_op_name(op_type), length, offset, result);
4809 ceph_put_snap_context(snapc);
4811 blk_mq_end_request(rq, errno_to_blk_status(result));
4814 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4815 const struct blk_mq_queue_data *bd)
4817 struct request *rq = bd->rq;
4818 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4820 queue_work(rbd_wq, work);
4824 static void rbd_free_disk(struct rbd_device *rbd_dev)
4826 blk_cleanup_queue(rbd_dev->disk->queue);
4827 blk_mq_free_tag_set(&rbd_dev->tag_set);
4828 put_disk(rbd_dev->disk);
4829 rbd_dev->disk = NULL;
4832 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4833 struct ceph_object_id *oid,
4834 struct ceph_object_locator *oloc,
4835 void *buf, int buf_len)
4838 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4839 struct ceph_osd_request *req;
4840 struct page **pages;
4841 int num_pages = calc_pages_for(0, buf_len);
4844 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4848 ceph_oid_copy(&req->r_base_oid, oid);
4849 ceph_oloc_copy(&req->r_base_oloc, oloc);
4850 req->r_flags = CEPH_OSD_FLAG_READ;
4852 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4853 if (IS_ERR(pages)) {
4854 ret = PTR_ERR(pages);
4858 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4859 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4862 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4866 ceph_osdc_start_request(osdc, req, false);
4867 ret = ceph_osdc_wait_request(osdc, req);
4869 ceph_copy_from_page_vector(pages, buf, 0, ret);
4872 ceph_osdc_put_request(req);
4877 * Read the complete header for the given rbd device. On successful
4878 * return, the rbd_dev->header field will contain up-to-date
4879 * information about the image.
4881 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4883 struct rbd_image_header_ondisk *ondisk = NULL;
4890 * The complete header will include an array of its 64-bit
4891 * snapshot ids, followed by the names of those snapshots as
4892 * a contiguous block of NUL-terminated strings. Note that
4893 * the number of snapshots could change by the time we read
4894 * it in, in which case we re-read it.
4901 size = sizeof (*ondisk);
4902 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4904 ondisk = kmalloc(size, GFP_KERNEL);
4908 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4909 &rbd_dev->header_oloc, ondisk, size);
4912 if ((size_t)ret < size) {
4914 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4918 if (!rbd_dev_ondisk_valid(ondisk)) {
4920 rbd_warn(rbd_dev, "invalid header");
4924 names_size = le64_to_cpu(ondisk->snap_names_len);
4925 want_count = snap_count;
4926 snap_count = le32_to_cpu(ondisk->snap_count);
4927 } while (snap_count != want_count);
4929 ret = rbd_header_from_disk(rbd_dev, ondisk);
4936 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4941 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4942 * try to update its size. If REMOVING is set, updating size
4943 * is just useless work since the device can't be opened.
4945 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4946 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4947 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4948 dout("setting size to %llu sectors", (unsigned long long)size);
4949 set_capacity(rbd_dev->disk, size);
4950 revalidate_disk(rbd_dev->disk);
4954 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4959 down_write(&rbd_dev->header_rwsem);
4960 mapping_size = rbd_dev->mapping.size;
4962 ret = rbd_dev_header_info(rbd_dev);
4967 * If there is a parent, see if it has disappeared due to the
4968 * mapped image getting flattened.
4970 if (rbd_dev->parent) {
4971 ret = rbd_dev_v2_parent_info(rbd_dev);
4976 rbd_assert(!rbd_is_snap(rbd_dev));
4977 rbd_dev->mapping.size = rbd_dev->header.image_size;
4980 up_write(&rbd_dev->header_rwsem);
4981 if (!ret && mapping_size != rbd_dev->mapping.size)
4982 rbd_dev_update_size(rbd_dev);
4987 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
4988 unsigned int hctx_idx, unsigned int numa_node)
4990 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4992 INIT_WORK(work, rbd_queue_workfn);
4996 static const struct blk_mq_ops rbd_mq_ops = {
4997 .queue_rq = rbd_queue_rq,
4998 .init_request = rbd_init_request,
5001 static int rbd_init_disk(struct rbd_device *rbd_dev)
5003 struct gendisk *disk;
5004 struct request_queue *q;
5005 unsigned int objset_bytes =
5006 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
5009 /* create gendisk info */
5010 disk = alloc_disk(single_major ?
5011 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
5012 RBD_MINORS_PER_MAJOR);
5016 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
5018 disk->major = rbd_dev->major;
5019 disk->first_minor = rbd_dev->minor;
5021 disk->flags |= GENHD_FL_EXT_DEVT;
5022 disk->fops = &rbd_bd_ops;
5023 disk->private_data = rbd_dev;
5025 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5026 rbd_dev->tag_set.ops = &rbd_mq_ops;
5027 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
5028 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
5029 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5030 rbd_dev->tag_set.nr_hw_queues = 1;
5031 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
5033 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5037 q = blk_mq_init_queue(&rbd_dev->tag_set);
5043 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5044 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5046 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5047 q->limits.max_sectors = queue_max_hw_sectors(q);
5048 blk_queue_max_segments(q, USHRT_MAX);
5049 blk_queue_max_segment_size(q, UINT_MAX);
5050 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5051 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5053 if (rbd_dev->opts->trim) {
5054 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
5055 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5056 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5057 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5060 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5061 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
5064 * disk_release() expects a queue ref from add_disk() and will
5065 * put it. Hold an extra ref until add_disk() is called.
5067 WARN_ON(!blk_get_queue(q));
5069 q->queuedata = rbd_dev;
5071 rbd_dev->disk = disk;
5075 blk_mq_free_tag_set(&rbd_dev->tag_set);
5085 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5087 return container_of(dev, struct rbd_device, dev);
5090 static ssize_t rbd_size_show(struct device *dev,
5091 struct device_attribute *attr, char *buf)
5093 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5095 return sprintf(buf, "%llu\n",
5096 (unsigned long long)rbd_dev->mapping.size);
5099 static ssize_t rbd_features_show(struct device *dev,
5100 struct device_attribute *attr, char *buf)
5102 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5104 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5107 static ssize_t rbd_major_show(struct device *dev,
5108 struct device_attribute *attr, char *buf)
5110 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5113 return sprintf(buf, "%d\n", rbd_dev->major);
5115 return sprintf(buf, "(none)\n");
5118 static ssize_t rbd_minor_show(struct device *dev,
5119 struct device_attribute *attr, char *buf)
5121 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5123 return sprintf(buf, "%d\n", rbd_dev->minor);
5126 static ssize_t rbd_client_addr_show(struct device *dev,
5127 struct device_attribute *attr, char *buf)
5129 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5130 struct ceph_entity_addr *client_addr =
5131 ceph_client_addr(rbd_dev->rbd_client->client);
5133 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5134 le32_to_cpu(client_addr->nonce));
5137 static ssize_t rbd_client_id_show(struct device *dev,
5138 struct device_attribute *attr, char *buf)
5140 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5142 return sprintf(buf, "client%lld\n",
5143 ceph_client_gid(rbd_dev->rbd_client->client));
5146 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5147 struct device_attribute *attr, char *buf)
5149 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5151 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5154 static ssize_t rbd_config_info_show(struct device *dev,
5155 struct device_attribute *attr, char *buf)
5157 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5159 return sprintf(buf, "%s\n", rbd_dev->config_info);
5162 static ssize_t rbd_pool_show(struct device *dev,
5163 struct device_attribute *attr, char *buf)
5165 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5167 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5170 static ssize_t rbd_pool_id_show(struct device *dev,
5171 struct device_attribute *attr, char *buf)
5173 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5175 return sprintf(buf, "%llu\n",
5176 (unsigned long long) rbd_dev->spec->pool_id);
5179 static ssize_t rbd_pool_ns_show(struct device *dev,
5180 struct device_attribute *attr, char *buf)
5182 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5184 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5187 static ssize_t rbd_name_show(struct device *dev,
5188 struct device_attribute *attr, char *buf)
5190 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5192 if (rbd_dev->spec->image_name)
5193 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5195 return sprintf(buf, "(unknown)\n");
5198 static ssize_t rbd_image_id_show(struct device *dev,
5199 struct device_attribute *attr, char *buf)
5201 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5203 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5207 * Shows the name of the currently-mapped snapshot (or
5208 * RBD_SNAP_HEAD_NAME for the base image).
5210 static ssize_t rbd_snap_show(struct device *dev,
5211 struct device_attribute *attr,
5214 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5216 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5219 static ssize_t rbd_snap_id_show(struct device *dev,
5220 struct device_attribute *attr, char *buf)
5222 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5224 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5228 * For a v2 image, shows the chain of parent images, separated by empty
5229 * lines. For v1 images or if there is no parent, shows "(no parent
5232 static ssize_t rbd_parent_show(struct device *dev,
5233 struct device_attribute *attr,
5236 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5239 if (!rbd_dev->parent)
5240 return sprintf(buf, "(no parent image)\n");
5242 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5243 struct rbd_spec *spec = rbd_dev->parent_spec;
5245 count += sprintf(&buf[count], "%s"
5246 "pool_id %llu\npool_name %s\n"
5248 "image_id %s\nimage_name %s\n"
5249 "snap_id %llu\nsnap_name %s\n"
5251 !count ? "" : "\n", /* first? */
5252 spec->pool_id, spec->pool_name,
5253 spec->pool_ns ?: "",
5254 spec->image_id, spec->image_name ?: "(unknown)",
5255 spec->snap_id, spec->snap_name,
5256 rbd_dev->parent_overlap);
5262 static ssize_t rbd_image_refresh(struct device *dev,
5263 struct device_attribute *attr,
5267 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5270 ret = rbd_dev_refresh(rbd_dev);
5277 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5278 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5279 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5280 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5281 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5282 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5283 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5284 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5285 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5286 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5287 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5288 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5289 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5290 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5291 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5292 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5293 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5295 static struct attribute *rbd_attrs[] = {
5296 &dev_attr_size.attr,
5297 &dev_attr_features.attr,
5298 &dev_attr_major.attr,
5299 &dev_attr_minor.attr,
5300 &dev_attr_client_addr.attr,
5301 &dev_attr_client_id.attr,
5302 &dev_attr_cluster_fsid.attr,
5303 &dev_attr_config_info.attr,
5304 &dev_attr_pool.attr,
5305 &dev_attr_pool_id.attr,
5306 &dev_attr_pool_ns.attr,
5307 &dev_attr_name.attr,
5308 &dev_attr_image_id.attr,
5309 &dev_attr_current_snap.attr,
5310 &dev_attr_snap_id.attr,
5311 &dev_attr_parent.attr,
5312 &dev_attr_refresh.attr,
5316 static struct attribute_group rbd_attr_group = {
5320 static const struct attribute_group *rbd_attr_groups[] = {
5325 static void rbd_dev_release(struct device *dev);
5327 static const struct device_type rbd_device_type = {
5329 .groups = rbd_attr_groups,
5330 .release = rbd_dev_release,
5333 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5335 kref_get(&spec->kref);
5340 static void rbd_spec_free(struct kref *kref);
5341 static void rbd_spec_put(struct rbd_spec *spec)
5344 kref_put(&spec->kref, rbd_spec_free);
5347 static struct rbd_spec *rbd_spec_alloc(void)
5349 struct rbd_spec *spec;
5351 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5355 spec->pool_id = CEPH_NOPOOL;
5356 spec->snap_id = CEPH_NOSNAP;
5357 kref_init(&spec->kref);
5362 static void rbd_spec_free(struct kref *kref)
5364 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5366 kfree(spec->pool_name);
5367 kfree(spec->pool_ns);
5368 kfree(spec->image_id);
5369 kfree(spec->image_name);
5370 kfree(spec->snap_name);
5374 static void rbd_dev_free(struct rbd_device *rbd_dev)
5376 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5377 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5379 ceph_oid_destroy(&rbd_dev->header_oid);
5380 ceph_oloc_destroy(&rbd_dev->header_oloc);
5381 kfree(rbd_dev->config_info);
5383 rbd_put_client(rbd_dev->rbd_client);
5384 rbd_spec_put(rbd_dev->spec);
5385 kfree(rbd_dev->opts);
5389 static void rbd_dev_release(struct device *dev)
5391 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5392 bool need_put = !!rbd_dev->opts;
5395 destroy_workqueue(rbd_dev->task_wq);
5396 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5399 rbd_dev_free(rbd_dev);
5402 * This is racy, but way better than putting module outside of
5403 * the release callback. The race window is pretty small, so
5404 * doing something similar to dm (dm-builtin.c) is overkill.
5407 module_put(THIS_MODULE);
5410 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
5411 struct rbd_spec *spec)
5413 struct rbd_device *rbd_dev;
5415 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5419 spin_lock_init(&rbd_dev->lock);
5420 INIT_LIST_HEAD(&rbd_dev->node);
5421 init_rwsem(&rbd_dev->header_rwsem);
5423 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5424 ceph_oid_init(&rbd_dev->header_oid);
5425 rbd_dev->header_oloc.pool = spec->pool_id;
5426 if (spec->pool_ns) {
5427 WARN_ON(!*spec->pool_ns);
5428 rbd_dev->header_oloc.pool_ns =
5429 ceph_find_or_create_string(spec->pool_ns,
5430 strlen(spec->pool_ns));
5433 mutex_init(&rbd_dev->watch_mutex);
5434 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5435 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5437 init_rwsem(&rbd_dev->lock_rwsem);
5438 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5439 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5440 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5441 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5442 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5443 spin_lock_init(&rbd_dev->lock_lists_lock);
5444 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5445 INIT_LIST_HEAD(&rbd_dev->running_list);
5446 init_completion(&rbd_dev->acquire_wait);
5447 init_completion(&rbd_dev->releasing_wait);
5449 spin_lock_init(&rbd_dev->object_map_lock);
5451 rbd_dev->dev.bus = &rbd_bus_type;
5452 rbd_dev->dev.type = &rbd_device_type;
5453 rbd_dev->dev.parent = &rbd_root_dev;
5454 device_initialize(&rbd_dev->dev);
5456 rbd_dev->rbd_client = rbdc;
5457 rbd_dev->spec = spec;
5463 * Create a mapping rbd_dev.
5465 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5466 struct rbd_spec *spec,
5467 struct rbd_options *opts)
5469 struct rbd_device *rbd_dev;
5471 rbd_dev = __rbd_dev_create(rbdc, spec);
5475 rbd_dev->opts = opts;
5477 /* get an id and fill in device name */
5478 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5479 minor_to_rbd_dev_id(1 << MINORBITS),
5481 if (rbd_dev->dev_id < 0)
5484 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5485 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5487 if (!rbd_dev->task_wq)
5490 /* we have a ref from do_rbd_add() */
5491 __module_get(THIS_MODULE);
5493 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5497 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5499 rbd_dev_free(rbd_dev);
5503 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5506 put_device(&rbd_dev->dev);
5510 * Get the size and object order for an image snapshot, or if
5511 * snap_id is CEPH_NOSNAP, gets this information for the base
5514 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5515 u8 *order, u64 *snap_size)
5517 __le64 snapid = cpu_to_le64(snap_id);
5522 } __attribute__ ((packed)) size_buf = { 0 };
5524 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5525 &rbd_dev->header_oloc, "get_size",
5526 &snapid, sizeof(snapid),
5527 &size_buf, sizeof(size_buf));
5528 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5531 if (ret < sizeof (size_buf))
5535 *order = size_buf.order;
5536 dout(" order %u", (unsigned int)*order);
5538 *snap_size = le64_to_cpu(size_buf.size);
5540 dout(" snap_id 0x%016llx snap_size = %llu\n",
5541 (unsigned long long)snap_id,
5542 (unsigned long long)*snap_size);
5547 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5549 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5550 &rbd_dev->header.obj_order,
5551 &rbd_dev->header.image_size);
5554 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5561 /* Response will be an encoded string, which includes a length */
5562 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5563 reply_buf = kzalloc(size, GFP_KERNEL);
5567 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5568 &rbd_dev->header_oloc, "get_object_prefix",
5569 NULL, 0, reply_buf, size);
5570 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5575 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5576 p + ret, NULL, GFP_NOIO);
5579 if (IS_ERR(rbd_dev->header.object_prefix)) {
5580 ret = PTR_ERR(rbd_dev->header.object_prefix);
5581 rbd_dev->header.object_prefix = NULL;
5583 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5591 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5592 bool read_only, u64 *snap_features)
5601 } __attribute__ ((packed)) features_buf = { 0 };
5605 features_in.snap_id = cpu_to_le64(snap_id);
5606 features_in.read_only = read_only;
5608 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5609 &rbd_dev->header_oloc, "get_features",
5610 &features_in, sizeof(features_in),
5611 &features_buf, sizeof(features_buf));
5612 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5615 if (ret < sizeof (features_buf))
5618 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5620 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5625 *snap_features = le64_to_cpu(features_buf.features);
5627 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5628 (unsigned long long)snap_id,
5629 (unsigned long long)*snap_features,
5630 (unsigned long long)le64_to_cpu(features_buf.incompat));
5635 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5637 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5639 &rbd_dev->header.features);
5643 * These are generic image flags, but since they are used only for
5644 * object map, store them in rbd_dev->object_map_flags.
5646 * For the same reason, this function is called only on object map
5647 * (re)load and not on header refresh.
5649 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5651 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5655 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5656 &rbd_dev->header_oloc, "get_flags",
5657 &snapid, sizeof(snapid),
5658 &flags, sizeof(flags));
5661 if (ret < sizeof(flags))
5664 rbd_dev->object_map_flags = le64_to_cpu(flags);
5668 struct parent_image_info {
5670 const char *pool_ns;
5671 const char *image_id;
5679 * The caller is responsible for @pii.
5681 static int decode_parent_image_spec(void **p, void *end,
5682 struct parent_image_info *pii)
5688 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5689 &struct_v, &struct_len);
5693 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5694 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5695 if (IS_ERR(pii->pool_ns)) {
5696 ret = PTR_ERR(pii->pool_ns);
5697 pii->pool_ns = NULL;
5700 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5701 if (IS_ERR(pii->image_id)) {
5702 ret = PTR_ERR(pii->image_id);
5703 pii->image_id = NULL;
5706 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5713 static int __get_parent_info(struct rbd_device *rbd_dev,
5714 struct page *req_page,
5715 struct page *reply_page,
5716 struct parent_image_info *pii)
5718 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5719 size_t reply_len = PAGE_SIZE;
5723 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5724 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5725 req_page, sizeof(u64), &reply_page, &reply_len);
5727 return ret == -EOPNOTSUPP ? 1 : ret;
5729 p = page_address(reply_page);
5730 end = p + reply_len;
5731 ret = decode_parent_image_spec(&p, end, pii);
5735 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5736 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5737 req_page, sizeof(u64), &reply_page, &reply_len);
5741 p = page_address(reply_page);
5742 end = p + reply_len;
5743 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5744 if (pii->has_overlap)
5745 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5754 * The caller is responsible for @pii.
5756 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5757 struct page *req_page,
5758 struct page *reply_page,
5759 struct parent_image_info *pii)
5761 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5762 size_t reply_len = PAGE_SIZE;
5766 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5767 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5768 req_page, sizeof(u64), &reply_page, &reply_len);
5772 p = page_address(reply_page);
5773 end = p + reply_len;
5774 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5775 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5776 if (IS_ERR(pii->image_id)) {
5777 ret = PTR_ERR(pii->image_id);
5778 pii->image_id = NULL;
5781 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5782 pii->has_overlap = true;
5783 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5791 static int get_parent_info(struct rbd_device *rbd_dev,
5792 struct parent_image_info *pii)
5794 struct page *req_page, *reply_page;
5798 req_page = alloc_page(GFP_KERNEL);
5802 reply_page = alloc_page(GFP_KERNEL);
5804 __free_page(req_page);
5808 p = page_address(req_page);
5809 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5810 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5812 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5815 __free_page(req_page);
5816 __free_page(reply_page);
5820 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5822 struct rbd_spec *parent_spec;
5823 struct parent_image_info pii = { 0 };
5826 parent_spec = rbd_spec_alloc();
5830 ret = get_parent_info(rbd_dev, &pii);
5834 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5835 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5836 pii.has_overlap, pii.overlap);
5838 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
5840 * Either the parent never existed, or we have
5841 * record of it but the image got flattened so it no
5842 * longer has a parent. When the parent of a
5843 * layered image disappears we immediately set the
5844 * overlap to 0. The effect of this is that all new
5845 * requests will be treated as if the image had no
5848 * If !pii.has_overlap, the parent image spec is not
5849 * applicable. It's there to avoid duplication in each
5852 if (rbd_dev->parent_overlap) {
5853 rbd_dev->parent_overlap = 0;
5854 rbd_dev_parent_put(rbd_dev);
5855 pr_info("%s: clone image has been flattened\n",
5856 rbd_dev->disk->disk_name);
5859 goto out; /* No parent? No problem. */
5862 /* The ceph file layout needs to fit pool id in 32 bits */
5865 if (pii.pool_id > (u64)U32_MAX) {
5866 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5867 (unsigned long long)pii.pool_id, U32_MAX);
5872 * The parent won't change (except when the clone is
5873 * flattened, already handled that). So we only need to
5874 * record the parent spec we have not already done so.
5876 if (!rbd_dev->parent_spec) {
5877 parent_spec->pool_id = pii.pool_id;
5878 if (pii.pool_ns && *pii.pool_ns) {
5879 parent_spec->pool_ns = pii.pool_ns;
5882 parent_spec->image_id = pii.image_id;
5883 pii.image_id = NULL;
5884 parent_spec->snap_id = pii.snap_id;
5886 rbd_dev->parent_spec = parent_spec;
5887 parent_spec = NULL; /* rbd_dev now owns this */
5891 * We always update the parent overlap. If it's zero we issue
5892 * a warning, as we will proceed as if there was no parent.
5896 /* refresh, careful to warn just once */
5897 if (rbd_dev->parent_overlap)
5899 "clone now standalone (overlap became 0)");
5902 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5905 rbd_dev->parent_overlap = pii.overlap;
5911 kfree(pii.image_id);
5912 rbd_spec_put(parent_spec);
5916 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5920 __le64 stripe_count;
5921 } __attribute__ ((packed)) striping_info_buf = { 0 };
5922 size_t size = sizeof (striping_info_buf);
5926 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5927 &rbd_dev->header_oloc, "get_stripe_unit_count",
5928 NULL, 0, &striping_info_buf, size);
5929 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5935 p = &striping_info_buf;
5936 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
5937 rbd_dev->header.stripe_count = ceph_decode_64(&p);
5941 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5943 __le64 data_pool_id;
5946 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5947 &rbd_dev->header_oloc, "get_data_pool",
5948 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5951 if (ret < sizeof(data_pool_id))
5954 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5955 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5959 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5961 CEPH_DEFINE_OID_ONSTACK(oid);
5962 size_t image_id_size;
5967 void *reply_buf = NULL;
5969 char *image_name = NULL;
5972 rbd_assert(!rbd_dev->spec->image_name);
5974 len = strlen(rbd_dev->spec->image_id);
5975 image_id_size = sizeof (__le32) + len;
5976 image_id = kmalloc(image_id_size, GFP_KERNEL);
5981 end = image_id + image_id_size;
5982 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5984 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5985 reply_buf = kmalloc(size, GFP_KERNEL);
5989 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5990 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5991 "dir_get_name", image_id, image_id_size,
5996 end = reply_buf + ret;
5998 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5999 if (IS_ERR(image_name))
6002 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
6010 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6012 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6013 const char *snap_name;
6016 /* Skip over names until we find the one we are looking for */
6018 snap_name = rbd_dev->header.snap_names;
6019 while (which < snapc->num_snaps) {
6020 if (!strcmp(name, snap_name))
6021 return snapc->snaps[which];
6022 snap_name += strlen(snap_name) + 1;
6028 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6030 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6035 for (which = 0; !found && which < snapc->num_snaps; which++) {
6036 const char *snap_name;
6038 snap_id = snapc->snaps[which];
6039 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6040 if (IS_ERR(snap_name)) {
6041 /* ignore no-longer existing snapshots */
6042 if (PTR_ERR(snap_name) == -ENOENT)
6047 found = !strcmp(name, snap_name);
6050 return found ? snap_id : CEPH_NOSNAP;
6054 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6055 * no snapshot by that name is found, or if an error occurs.
6057 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6059 if (rbd_dev->image_format == 1)
6060 return rbd_v1_snap_id_by_name(rbd_dev, name);
6062 return rbd_v2_snap_id_by_name(rbd_dev, name);
6066 * An image being mapped will have everything but the snap id.
6068 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6070 struct rbd_spec *spec = rbd_dev->spec;
6072 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6073 rbd_assert(spec->image_id && spec->image_name);
6074 rbd_assert(spec->snap_name);
6076 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6079 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6080 if (snap_id == CEPH_NOSNAP)
6083 spec->snap_id = snap_id;
6085 spec->snap_id = CEPH_NOSNAP;
6092 * A parent image will have all ids but none of the names.
6094 * All names in an rbd spec are dynamically allocated. It's OK if we
6095 * can't figure out the name for an image id.
6097 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6099 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6100 struct rbd_spec *spec = rbd_dev->spec;
6101 const char *pool_name;
6102 const char *image_name;
6103 const char *snap_name;
6106 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6107 rbd_assert(spec->image_id);
6108 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6110 /* Get the pool name; we have to make our own copy of this */
6112 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6114 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6117 pool_name = kstrdup(pool_name, GFP_KERNEL);
6121 /* Fetch the image name; tolerate failure here */
6123 image_name = rbd_dev_image_name(rbd_dev);
6125 rbd_warn(rbd_dev, "unable to get image name");
6127 /* Fetch the snapshot name */
6129 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6130 if (IS_ERR(snap_name)) {
6131 ret = PTR_ERR(snap_name);
6135 spec->pool_name = pool_name;
6136 spec->image_name = image_name;
6137 spec->snap_name = snap_name;
6147 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
6156 struct ceph_snap_context *snapc;
6160 * We'll need room for the seq value (maximum snapshot id),
6161 * snapshot count, and array of that many snapshot ids.
6162 * For now we have a fixed upper limit on the number we're
6163 * prepared to receive.
6165 size = sizeof (__le64) + sizeof (__le32) +
6166 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6167 reply_buf = kzalloc(size, GFP_KERNEL);
6171 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6172 &rbd_dev->header_oloc, "get_snapcontext",
6173 NULL, 0, reply_buf, size);
6174 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6179 end = reply_buf + ret;
6181 ceph_decode_64_safe(&p, end, seq, out);
6182 ceph_decode_32_safe(&p, end, snap_count, out);
6185 * Make sure the reported number of snapshot ids wouldn't go
6186 * beyond the end of our buffer. But before checking that,
6187 * make sure the computed size of the snapshot context we
6188 * allocate is representable in a size_t.
6190 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6195 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6199 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6205 for (i = 0; i < snap_count; i++)
6206 snapc->snaps[i] = ceph_decode_64(&p);
6208 ceph_put_snap_context(rbd_dev->header.snapc);
6209 rbd_dev->header.snapc = snapc;
6211 dout(" snap context seq = %llu, snap_count = %u\n",
6212 (unsigned long long)seq, (unsigned int)snap_count);
6219 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6230 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6231 reply_buf = kmalloc(size, GFP_KERNEL);
6233 return ERR_PTR(-ENOMEM);
6235 snapid = cpu_to_le64(snap_id);
6236 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6237 &rbd_dev->header_oloc, "get_snapshot_name",
6238 &snapid, sizeof(snapid), reply_buf, size);
6239 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6241 snap_name = ERR_PTR(ret);
6246 end = reply_buf + ret;
6247 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6248 if (IS_ERR(snap_name))
6251 dout(" snap_id 0x%016llx snap_name = %s\n",
6252 (unsigned long long)snap_id, snap_name);
6259 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
6261 bool first_time = rbd_dev->header.object_prefix == NULL;
6264 ret = rbd_dev_v2_image_size(rbd_dev);
6269 ret = rbd_dev_v2_header_onetime(rbd_dev);
6274 ret = rbd_dev_v2_snap_context(rbd_dev);
6275 if (ret && first_time) {
6276 kfree(rbd_dev->header.object_prefix);
6277 rbd_dev->header.object_prefix = NULL;
6283 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6285 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6287 if (rbd_dev->image_format == 1)
6288 return rbd_dev_v1_header_info(rbd_dev);
6290 return rbd_dev_v2_header_info(rbd_dev);
6294 * Skips over white space at *buf, and updates *buf to point to the
6295 * first found non-space character (if any). Returns the length of
6296 * the token (string of non-white space characters) found. Note
6297 * that *buf must be terminated with '\0'.
6299 static inline size_t next_token(const char **buf)
6302 * These are the characters that produce nonzero for
6303 * isspace() in the "C" and "POSIX" locales.
6305 const char *spaces = " \f\n\r\t\v";
6307 *buf += strspn(*buf, spaces); /* Find start of token */
6309 return strcspn(*buf, spaces); /* Return token length */
6313 * Finds the next token in *buf, dynamically allocates a buffer big
6314 * enough to hold a copy of it, and copies the token into the new
6315 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6316 * that a duplicate buffer is created even for a zero-length token.
6318 * Returns a pointer to the newly-allocated duplicate, or a null
6319 * pointer if memory for the duplicate was not available. If
6320 * the lenp argument is a non-null pointer, the length of the token
6321 * (not including the '\0') is returned in *lenp.
6323 * If successful, the *buf pointer will be updated to point beyond
6324 * the end of the found token.
6326 * Note: uses GFP_KERNEL for allocation.
6328 static inline char *dup_token(const char **buf, size_t *lenp)
6333 len = next_token(buf);
6334 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6337 *(dup + len) = '\0';
6346 static int rbd_parse_param(struct fs_parameter *param,
6347 struct rbd_parse_opts_ctx *pctx)
6349 struct rbd_options *opt = pctx->opts;
6350 struct fs_parse_result result;
6351 struct p_log log = {.prefix = "rbd"};
6354 ret = ceph_parse_param(param, pctx->copts, NULL);
6355 if (ret != -ENOPARAM)
6358 token = __fs_parse(&log, rbd_parameters, param, &result);
6359 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6361 if (token == -ENOPARAM)
6362 return inval_plog(&log, "Unknown parameter '%s'",
6368 case Opt_queue_depth:
6369 if (result.uint_32 < 1)
6371 opt->queue_depth = result.uint_32;
6373 case Opt_alloc_size:
6374 if (result.uint_32 < SECTOR_SIZE)
6376 if (!is_power_of_2(result.uint_32))
6377 return inval_plog(&log, "alloc_size must be a power of 2");
6378 opt->alloc_size = result.uint_32;
6380 case Opt_lock_timeout:
6381 /* 0 is "wait forever" (i.e. infinite timeout) */
6382 if (result.uint_32 > INT_MAX / 1000)
6384 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6387 kfree(pctx->spec->pool_ns);
6388 pctx->spec->pool_ns = param->string;
6389 param->string = NULL;
6392 opt->read_only = true;
6394 case Opt_read_write:
6395 opt->read_only = false;
6397 case Opt_lock_on_read:
6398 opt->lock_on_read = true;
6401 opt->exclusive = true;
6413 return inval_plog(&log, "%s out of range", param->key);
6417 * This duplicates most of generic_parse_monolithic(), untying it from
6418 * fs_context and skipping standard superblock and security options.
6420 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6425 dout("%s '%s'\n", __func__, options);
6426 while ((key = strsep(&options, ",")) != NULL) {
6428 struct fs_parameter param = {
6430 .type = fs_value_is_flag,
6432 char *value = strchr(key, '=');
6439 v_len = strlen(value);
6440 param.string = kmemdup_nul(value, v_len,
6444 param.type = fs_value_is_string;
6448 ret = rbd_parse_param(¶m, pctx);
6449 kfree(param.string);
6459 * Parse the options provided for an "rbd add" (i.e., rbd image
6460 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6461 * and the data written is passed here via a NUL-terminated buffer.
6462 * Returns 0 if successful or an error code otherwise.
6464 * The information extracted from these options is recorded in
6465 * the other parameters which return dynamically-allocated
6468 * The address of a pointer that will refer to a ceph options
6469 * structure. Caller must release the returned pointer using
6470 * ceph_destroy_options() when it is no longer needed.
6472 * Address of an rbd options pointer. Fully initialized by
6473 * this function; caller must release with kfree().
6475 * Address of an rbd image specification pointer. Fully
6476 * initialized by this function based on parsed options.
6477 * Caller must release with rbd_spec_put().
6479 * The options passed take this form:
6480 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6483 * A comma-separated list of one or more monitor addresses.
6484 * A monitor address is an ip address, optionally followed
6485 * by a port number (separated by a colon).
6486 * I.e.: ip1[:port1][,ip2[:port2]...]
6488 * A comma-separated list of ceph and/or rbd options.
6490 * The name of the rados pool containing the rbd image.
6492 * The name of the image in that pool to map.
6494 * An optional snapshot id. If provided, the mapping will
6495 * present data from the image at the time that snapshot was
6496 * created. The image head is used if no snapshot id is
6497 * provided. Snapshot mappings are always read-only.
6499 static int rbd_add_parse_args(const char *buf,
6500 struct ceph_options **ceph_opts,
6501 struct rbd_options **opts,
6502 struct rbd_spec **rbd_spec)
6506 const char *mon_addrs;
6508 size_t mon_addrs_size;
6509 struct rbd_parse_opts_ctx pctx = { 0 };
6512 /* The first four tokens are required */
6514 len = next_token(&buf);
6516 rbd_warn(NULL, "no monitor address(es) provided");
6520 mon_addrs_size = len;
6524 options = dup_token(&buf, NULL);
6528 rbd_warn(NULL, "no options provided");
6532 pctx.spec = rbd_spec_alloc();
6536 pctx.spec->pool_name = dup_token(&buf, NULL);
6537 if (!pctx.spec->pool_name)
6539 if (!*pctx.spec->pool_name) {
6540 rbd_warn(NULL, "no pool name provided");
6544 pctx.spec->image_name = dup_token(&buf, NULL);
6545 if (!pctx.spec->image_name)
6547 if (!*pctx.spec->image_name) {
6548 rbd_warn(NULL, "no image name provided");
6553 * Snapshot name is optional; default is to use "-"
6554 * (indicating the head/no snapshot).
6556 len = next_token(&buf);
6558 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6559 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6560 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6561 ret = -ENAMETOOLONG;
6564 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6567 *(snap_name + len) = '\0';
6568 pctx.spec->snap_name = snap_name;
6570 pctx.copts = ceph_alloc_options();
6574 /* Initialize all rbd options to the defaults */
6576 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6580 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6581 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6582 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6583 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6584 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6585 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6586 pctx.opts->trim = RBD_TRIM_DEFAULT;
6588 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
6592 ret = rbd_parse_options(options, &pctx);
6596 *ceph_opts = pctx.copts;
6598 *rbd_spec = pctx.spec;
6606 ceph_destroy_options(pctx.copts);
6607 rbd_spec_put(pctx.spec);
6612 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6614 down_write(&rbd_dev->lock_rwsem);
6615 if (__rbd_is_lock_owner(rbd_dev))
6616 __rbd_release_lock(rbd_dev);
6617 up_write(&rbd_dev->lock_rwsem);
6621 * If the wait is interrupted, an error is returned even if the lock
6622 * was successfully acquired. rbd_dev_image_unlock() will release it
6625 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6629 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6630 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6633 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6637 if (rbd_is_ro(rbd_dev))
6640 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6641 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6642 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6643 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6645 ret = rbd_dev->acquire_err;
6647 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6653 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6658 * The lock may have been released by now, unless automatic lock
6659 * transitions are disabled.
6661 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6666 * An rbd format 2 image has a unique identifier, distinct from the
6667 * name given to it by the user. Internally, that identifier is
6668 * what's used to specify the names of objects related to the image.
6670 * A special "rbd id" object is used to map an rbd image name to its
6671 * id. If that object doesn't exist, then there is no v2 rbd image
6672 * with the supplied name.
6674 * This function will record the given rbd_dev's image_id field if
6675 * it can be determined, and in that case will return 0. If any
6676 * errors occur a negative errno will be returned and the rbd_dev's
6677 * image_id field will be unchanged (and should be NULL).
6679 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6683 CEPH_DEFINE_OID_ONSTACK(oid);
6688 * When probing a parent image, the image id is already
6689 * known (and the image name likely is not). There's no
6690 * need to fetch the image id again in this case. We
6691 * do still need to set the image format though.
6693 if (rbd_dev->spec->image_id) {
6694 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6700 * First, see if the format 2 image id file exists, and if
6701 * so, get the image's persistent id from it.
6703 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6704 rbd_dev->spec->image_name);
6708 dout("rbd id object name is %s\n", oid.name);
6710 /* Response will be an encoded string, which includes a length */
6711 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6712 response = kzalloc(size, GFP_NOIO);
6718 /* If it doesn't exist we'll assume it's a format 1 image */
6720 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6723 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6724 if (ret == -ENOENT) {
6725 image_id = kstrdup("", GFP_KERNEL);
6726 ret = image_id ? 0 : -ENOMEM;
6728 rbd_dev->image_format = 1;
6729 } else if (ret >= 0) {
6732 image_id = ceph_extract_encoded_string(&p, p + ret,
6734 ret = PTR_ERR_OR_ZERO(image_id);
6736 rbd_dev->image_format = 2;
6740 rbd_dev->spec->image_id = image_id;
6741 dout("image_id is %s\n", image_id);
6745 ceph_oid_destroy(&oid);
6750 * Undo whatever state changes are made by v1 or v2 header info
6753 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6755 struct rbd_image_header *header;
6757 rbd_dev_parent_put(rbd_dev);
6758 rbd_object_map_free(rbd_dev);
6759 rbd_dev_mapping_clear(rbd_dev);
6761 /* Free dynamic fields from the header, then zero it out */
6763 header = &rbd_dev->header;
6764 ceph_put_snap_context(header->snapc);
6765 kfree(header->snap_sizes);
6766 kfree(header->snap_names);
6767 kfree(header->object_prefix);
6768 memset(header, 0, sizeof (*header));
6771 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
6775 ret = rbd_dev_v2_object_prefix(rbd_dev);
6780 * Get the and check features for the image. Currently the
6781 * features are assumed to never change.
6783 ret = rbd_dev_v2_features(rbd_dev);
6787 /* If the image supports fancy striping, get its parameters */
6789 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6790 ret = rbd_dev_v2_striping_info(rbd_dev);
6795 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6796 ret = rbd_dev_v2_data_pool(rbd_dev);
6801 rbd_init_layout(rbd_dev);
6805 rbd_dev->header.features = 0;
6806 kfree(rbd_dev->header.object_prefix);
6807 rbd_dev->header.object_prefix = NULL;
6812 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6813 * rbd_dev_image_probe() recursion depth, which means it's also the
6814 * length of the already discovered part of the parent chain.
6816 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6818 struct rbd_device *parent = NULL;
6821 if (!rbd_dev->parent_spec)
6824 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6825 pr_info("parent chain is too long (%d)\n", depth);
6830 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
6837 * Images related by parent/child relationships always share
6838 * rbd_client and spec/parent_spec, so bump their refcounts.
6840 __rbd_get_client(rbd_dev->rbd_client);
6841 rbd_spec_get(rbd_dev->parent_spec);
6843 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6845 ret = rbd_dev_image_probe(parent, depth);
6849 rbd_dev->parent = parent;
6850 atomic_set(&rbd_dev->parent_ref, 1);
6854 rbd_dev_unparent(rbd_dev);
6855 rbd_dev_destroy(parent);
6859 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6861 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6862 rbd_free_disk(rbd_dev);
6864 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6868 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6871 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6875 /* Record our major and minor device numbers. */
6877 if (!single_major) {
6878 ret = register_blkdev(0, rbd_dev->name);
6880 goto err_out_unlock;
6882 rbd_dev->major = ret;
6885 rbd_dev->major = rbd_major;
6886 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6889 /* Set up the blkdev mapping. */
6891 ret = rbd_init_disk(rbd_dev);
6893 goto err_out_blkdev;
6895 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6896 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6898 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6902 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6903 up_write(&rbd_dev->header_rwsem);
6907 rbd_free_disk(rbd_dev);
6910 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6912 up_write(&rbd_dev->header_rwsem);
6916 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6918 struct rbd_spec *spec = rbd_dev->spec;
6921 /* Record the header object name for this rbd image. */
6923 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6924 if (rbd_dev->image_format == 1)
6925 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6926 spec->image_name, RBD_SUFFIX);
6928 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6929 RBD_HEADER_PREFIX, spec->image_id);
6934 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6937 pr_info("image %s/%s%s%s does not exist\n",
6938 rbd_dev->spec->pool_name,
6939 rbd_dev->spec->pool_ns ?: "",
6940 rbd_dev->spec->pool_ns ? "/" : "",
6941 rbd_dev->spec->image_name);
6943 pr_info("snap %s/%s%s%s@%s does not exist\n",
6944 rbd_dev->spec->pool_name,
6945 rbd_dev->spec->pool_ns ?: "",
6946 rbd_dev->spec->pool_ns ? "/" : "",
6947 rbd_dev->spec->image_name,
6948 rbd_dev->spec->snap_name);
6952 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6954 rbd_dev_unprobe(rbd_dev);
6956 rbd_unregister_watch(rbd_dev);
6957 rbd_dev->image_format = 0;
6958 kfree(rbd_dev->spec->image_id);
6959 rbd_dev->spec->image_id = NULL;
6963 * Probe for the existence of the header object for the given rbd
6964 * device. If this image is the one being mapped (i.e., not a
6965 * parent), initiate a watch on its header object before using that
6966 * object to get detailed information about the rbd image.
6968 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6970 bool need_watch = !rbd_is_ro(rbd_dev);
6974 * Get the id from the image id object. Unless there's an
6975 * error, rbd_dev->spec->image_id will be filled in with
6976 * a dynamically-allocated string, and rbd_dev->image_format
6977 * will be set to either 1 or 2.
6979 ret = rbd_dev_image_id(rbd_dev);
6983 ret = rbd_dev_header_name(rbd_dev);
6985 goto err_out_format;
6988 ret = rbd_register_watch(rbd_dev);
6991 rbd_print_dne(rbd_dev, false);
6992 goto err_out_format;
6996 ret = rbd_dev_header_info(rbd_dev);
6998 if (ret == -ENOENT && !need_watch)
6999 rbd_print_dne(rbd_dev, false);
7004 * If this image is the one being mapped, we have pool name and
7005 * id, image name and id, and snap name - need to fill snap id.
7006 * Otherwise this is a parent image, identified by pool, image
7007 * and snap ids - need to fill in names for those ids.
7010 ret = rbd_spec_fill_snap_id(rbd_dev);
7012 ret = rbd_spec_fill_names(rbd_dev);
7015 rbd_print_dne(rbd_dev, true);
7019 ret = rbd_dev_mapping_set(rbd_dev);
7023 if (rbd_is_snap(rbd_dev) &&
7024 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7025 ret = rbd_object_map_load(rbd_dev);
7030 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7031 ret = rbd_dev_v2_parent_info(rbd_dev);
7036 ret = rbd_dev_probe_parent(rbd_dev, depth);
7040 dout("discovered format %u image, header name is %s\n",
7041 rbd_dev->image_format, rbd_dev->header_oid.name);
7045 rbd_dev_unprobe(rbd_dev);
7048 rbd_unregister_watch(rbd_dev);
7050 rbd_dev->image_format = 0;
7051 kfree(rbd_dev->spec->image_id);
7052 rbd_dev->spec->image_id = NULL;
7056 static ssize_t do_rbd_add(struct bus_type *bus,
7060 struct rbd_device *rbd_dev = NULL;
7061 struct ceph_options *ceph_opts = NULL;
7062 struct rbd_options *rbd_opts = NULL;
7063 struct rbd_spec *spec = NULL;
7064 struct rbd_client *rbdc;
7067 if (!try_module_get(THIS_MODULE))
7070 /* parse add command */
7071 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7075 rbdc = rbd_get_client(ceph_opts);
7082 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7085 pr_info("pool %s does not exist\n", spec->pool_name);
7086 goto err_out_client;
7088 spec->pool_id = (u64)rc;
7090 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7093 goto err_out_client;
7095 rbdc = NULL; /* rbd_dev now owns this */
7096 spec = NULL; /* rbd_dev now owns this */
7097 rbd_opts = NULL; /* rbd_dev now owns this */
7099 /* if we are mapping a snapshot it will be a read-only mapping */
7100 if (rbd_dev->opts->read_only ||
7101 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7102 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7104 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7105 if (!rbd_dev->config_info) {
7107 goto err_out_rbd_dev;
7110 down_write(&rbd_dev->header_rwsem);
7111 rc = rbd_dev_image_probe(rbd_dev, 0);
7113 up_write(&rbd_dev->header_rwsem);
7114 goto err_out_rbd_dev;
7117 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7118 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7119 rbd_dev->layout.object_size);
7120 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7123 rc = rbd_dev_device_setup(rbd_dev);
7125 goto err_out_image_probe;
7127 rc = rbd_add_acquire_lock(rbd_dev);
7129 goto err_out_image_lock;
7131 /* Everything's ready. Announce the disk to the world. */
7133 rc = device_add(&rbd_dev->dev);
7135 goto err_out_image_lock;
7137 device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7138 /* see rbd_init_disk() */
7139 blk_put_queue(rbd_dev->disk->queue);
7141 spin_lock(&rbd_dev_list_lock);
7142 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7143 spin_unlock(&rbd_dev_list_lock);
7145 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7146 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7147 rbd_dev->header.features);
7150 module_put(THIS_MODULE);
7154 rbd_dev_image_unlock(rbd_dev);
7155 rbd_dev_device_release(rbd_dev);
7156 err_out_image_probe:
7157 rbd_dev_image_release(rbd_dev);
7159 rbd_dev_destroy(rbd_dev);
7161 rbd_put_client(rbdc);
7168 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7173 return do_rbd_add(bus, buf, count);
7176 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7179 return do_rbd_add(bus, buf, count);
7182 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7184 while (rbd_dev->parent) {
7185 struct rbd_device *first = rbd_dev;
7186 struct rbd_device *second = first->parent;
7187 struct rbd_device *third;
7190 * Follow to the parent with no grandparent and
7193 while (second && (third = second->parent)) {
7198 rbd_dev_image_release(second);
7199 rbd_dev_destroy(second);
7200 first->parent = NULL;
7201 first->parent_overlap = 0;
7203 rbd_assert(first->parent_spec);
7204 rbd_spec_put(first->parent_spec);
7205 first->parent_spec = NULL;
7209 static ssize_t do_rbd_remove(struct bus_type *bus,
7213 struct rbd_device *rbd_dev = NULL;
7214 struct list_head *tmp;
7222 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7224 pr_err("dev_id out of range\n");
7227 if (opt_buf[0] != '\0') {
7228 if (!strcmp(opt_buf, "force")) {
7231 pr_err("bad remove option at '%s'\n", opt_buf);
7237 spin_lock(&rbd_dev_list_lock);
7238 list_for_each(tmp, &rbd_dev_list) {
7239 rbd_dev = list_entry(tmp, struct rbd_device, node);
7240 if (rbd_dev->dev_id == dev_id) {
7246 spin_lock_irq(&rbd_dev->lock);
7247 if (rbd_dev->open_count && !force)
7249 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7252 spin_unlock_irq(&rbd_dev->lock);
7254 spin_unlock(&rbd_dev_list_lock);
7260 * Prevent new IO from being queued and wait for existing
7261 * IO to complete/fail.
7263 blk_mq_freeze_queue(rbd_dev->disk->queue);
7264 blk_set_queue_dying(rbd_dev->disk->queue);
7267 del_gendisk(rbd_dev->disk);
7268 spin_lock(&rbd_dev_list_lock);
7269 list_del_init(&rbd_dev->node);
7270 spin_unlock(&rbd_dev_list_lock);
7271 device_del(&rbd_dev->dev);
7273 rbd_dev_image_unlock(rbd_dev);
7274 rbd_dev_device_release(rbd_dev);
7275 rbd_dev_image_release(rbd_dev);
7276 rbd_dev_destroy(rbd_dev);
7280 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7285 return do_rbd_remove(bus, buf, count);
7288 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7291 return do_rbd_remove(bus, buf, count);
7295 * create control files in sysfs
7298 static int __init rbd_sysfs_init(void)
7302 ret = device_register(&rbd_root_dev);
7306 ret = bus_register(&rbd_bus_type);
7308 device_unregister(&rbd_root_dev);
7313 static void __exit rbd_sysfs_cleanup(void)
7315 bus_unregister(&rbd_bus_type);
7316 device_unregister(&rbd_root_dev);
7319 static int __init rbd_slab_init(void)
7321 rbd_assert(!rbd_img_request_cache);
7322 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7323 if (!rbd_img_request_cache)
7326 rbd_assert(!rbd_obj_request_cache);
7327 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7328 if (!rbd_obj_request_cache)
7334 kmem_cache_destroy(rbd_img_request_cache);
7335 rbd_img_request_cache = NULL;
7339 static void rbd_slab_exit(void)
7341 rbd_assert(rbd_obj_request_cache);
7342 kmem_cache_destroy(rbd_obj_request_cache);
7343 rbd_obj_request_cache = NULL;
7345 rbd_assert(rbd_img_request_cache);
7346 kmem_cache_destroy(rbd_img_request_cache);
7347 rbd_img_request_cache = NULL;
7350 static int __init rbd_init(void)
7354 if (!libceph_compatible(NULL)) {
7355 rbd_warn(NULL, "libceph incompatibility (quitting)");
7359 rc = rbd_slab_init();
7364 * The number of active work items is limited by the number of
7365 * rbd devices * queue depth, so leave @max_active at default.
7367 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7374 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7375 if (rbd_major < 0) {
7381 rc = rbd_sysfs_init();
7383 goto err_out_blkdev;
7386 pr_info("loaded (major %d)\n", rbd_major);
7388 pr_info("loaded\n");
7394 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7396 destroy_workqueue(rbd_wq);
7402 static void __exit rbd_exit(void)
7404 ida_destroy(&rbd_dev_id_ida);
7405 rbd_sysfs_cleanup();
7407 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7408 destroy_workqueue(rbd_wq);
7412 module_init(rbd_init);
7413 module_exit(rbd_exit);
7415 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7416 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7417 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7418 /* following authorship retained from original osdblk.c */
7419 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7421 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7422 MODULE_LICENSE("GPL");