3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
121 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
123 RBD_FEATURE_EXCLUSIVE_LOCK | \
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
127 /* Features supported by this (client software) implementation. */
129 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
135 #define DEV_NAME_LEN 32
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
147 u64 features; /* Might be changeable someday? */
149 /* The remaining fields need to be updated occasionally */
151 struct ceph_snap_context *snapc;
152 char *snap_names; /* format 1 only */
153 u64 *snap_sizes; /* format 1 only */
157 * An rbd image specification.
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
183 const char *pool_name;
184 const char *pool_ns; /* NULL if default, never "" */
186 const char *image_id;
187 const char *image_name;
190 const char *snap_name;
196 * an instance of the client. multiple devices may share an rbd client.
199 struct ceph_client *client;
201 struct list_head node;
204 struct rbd_img_request;
206 enum obj_request_type {
207 OBJ_REQUEST_NODATA = 1,
208 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
209 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
210 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
213 enum obj_operation_type {
220 * Writes go through the following state machine to deal with
224 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
226 * v \------------------------------/
232 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
233 * there is a parent or not.
235 enum rbd_obj_write_state {
236 RBD_OBJ_WRITE_FLAT = 1,
238 RBD_OBJ_WRITE_COPYUP,
241 struct rbd_obj_request {
242 struct ceph_object_extent ex;
244 bool tried_parent; /* for reads */
245 enum rbd_obj_write_state write_state; /* for writes */
248 struct rbd_img_request *img_request;
249 struct ceph_file_extent *img_extents;
253 struct ceph_bio_iter bio_pos;
255 struct ceph_bvec_iter bvec_pos;
260 struct bio_vec *copyup_bvecs;
261 u32 copyup_bvec_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
272 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
273 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
276 struct rbd_img_request {
277 struct rbd_device *rbd_dev;
278 enum obj_operation_type op_type;
279 enum obj_request_type data_type;
282 u64 snap_id; /* for reads */
283 struct ceph_snap_context *snapc; /* for writes */
286 struct request *rq; /* block request */
287 struct rbd_obj_request *obj_request; /* obj req initiator */
289 spinlock_t completion_lock;
290 u64 xferred;/* aggregate bytes transferred */
291 int result; /* first nonzero obj_request result */
293 struct list_head object_extents; /* obj_req.ex structs */
294 u32 obj_request_count;
300 #define for_each_obj_request(ireq, oreq) \
301 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
302 #define for_each_obj_request_safe(ireq, oreq, n) \
303 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
305 enum rbd_watch_state {
306 RBD_WATCH_STATE_UNREGISTERED,
307 RBD_WATCH_STATE_REGISTERED,
308 RBD_WATCH_STATE_ERROR,
311 enum rbd_lock_state {
312 RBD_LOCK_STATE_UNLOCKED,
313 RBD_LOCK_STATE_LOCKED,
314 RBD_LOCK_STATE_RELEASING,
317 /* WatchNotify::ClientId */
318 struct rbd_client_id {
332 int dev_id; /* blkdev unique id */
334 int major; /* blkdev assigned major */
336 struct gendisk *disk; /* blkdev's gendisk and rq */
338 u32 image_format; /* Either 1 or 2 */
339 struct rbd_client *rbd_client;
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343 spinlock_t lock; /* queue, flags, open_count */
345 struct rbd_image_header header;
346 unsigned long flags; /* possibly lock protected */
347 struct rbd_spec *spec;
348 struct rbd_options *opts;
349 char *config_info; /* add{,_single_major} string */
351 struct ceph_object_id header_oid;
352 struct ceph_object_locator header_oloc;
354 struct ceph_file_layout layout; /* used for all rbd requests */
356 struct mutex watch_mutex;
357 enum rbd_watch_state watch_state;
358 struct ceph_osd_linger_request *watch_handle;
360 struct delayed_work watch_dwork;
362 struct rw_semaphore lock_rwsem;
363 enum rbd_lock_state lock_state;
364 char lock_cookie[32];
365 struct rbd_client_id owner_cid;
366 struct work_struct acquired_lock_work;
367 struct work_struct released_lock_work;
368 struct delayed_work lock_dwork;
369 struct work_struct unlock_work;
370 wait_queue_head_t lock_waitq;
372 struct workqueue_struct *task_wq;
374 struct rbd_spec *parent_spec;
377 struct rbd_device *parent;
379 /* Block layer tags. */
380 struct blk_mq_tag_set tag_set;
382 /* protects updating the header */
383 struct rw_semaphore header_rwsem;
385 struct rbd_mapping mapping;
387 struct list_head node;
391 unsigned long open_count; /* protected by lock */
395 * Flag bits for rbd_dev->flags:
396 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
398 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
401 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
402 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
403 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
406 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
408 static LIST_HEAD(rbd_dev_list); /* devices */
409 static DEFINE_SPINLOCK(rbd_dev_list_lock);
411 static LIST_HEAD(rbd_client_list); /* clients */
412 static DEFINE_SPINLOCK(rbd_client_list_lock);
414 /* Slab caches for frequently-allocated structures */
416 static struct kmem_cache *rbd_img_request_cache;
417 static struct kmem_cache *rbd_obj_request_cache;
419 static int rbd_major;
420 static DEFINE_IDA(rbd_dev_id_ida);
422 static struct workqueue_struct *rbd_wq;
425 * single-major requires >= 0.75 version of userspace rbd utility.
427 static bool single_major = true;
428 module_param(single_major, bool, 0444);
429 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
431 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
433 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
435 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
437 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
439 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
441 static int rbd_dev_id_to_minor(int dev_id)
443 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
446 static int minor_to_rbd_dev_id(int minor)
448 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
451 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
453 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
454 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
457 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
461 down_read(&rbd_dev->lock_rwsem);
462 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
463 up_read(&rbd_dev->lock_rwsem);
464 return is_lock_owner;
467 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
469 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
472 static BUS_ATTR(add, 0200, NULL, rbd_add);
473 static BUS_ATTR(remove, 0200, NULL, rbd_remove);
474 static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major);
475 static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major);
476 static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL);
478 static struct attribute *rbd_bus_attrs[] = {
480 &bus_attr_remove.attr,
481 &bus_attr_add_single_major.attr,
482 &bus_attr_remove_single_major.attr,
483 &bus_attr_supported_features.attr,
487 static umode_t rbd_bus_is_visible(struct kobject *kobj,
488 struct attribute *attr, int index)
491 (attr == &bus_attr_add_single_major.attr ||
492 attr == &bus_attr_remove_single_major.attr))
498 static const struct attribute_group rbd_bus_group = {
499 .attrs = rbd_bus_attrs,
500 .is_visible = rbd_bus_is_visible,
502 __ATTRIBUTE_GROUPS(rbd_bus);
504 static struct bus_type rbd_bus_type = {
506 .bus_groups = rbd_bus_groups,
509 static void rbd_root_dev_release(struct device *dev)
513 static struct device rbd_root_dev = {
515 .release = rbd_root_dev_release,
518 static __printf(2, 3)
519 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
521 struct va_format vaf;
529 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
530 else if (rbd_dev->disk)
531 printk(KERN_WARNING "%s: %s: %pV\n",
532 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
533 else if (rbd_dev->spec && rbd_dev->spec->image_name)
534 printk(KERN_WARNING "%s: image %s: %pV\n",
535 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
536 else if (rbd_dev->spec && rbd_dev->spec->image_id)
537 printk(KERN_WARNING "%s: id %s: %pV\n",
538 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
540 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
541 RBD_DRV_NAME, rbd_dev, &vaf);
546 #define rbd_assert(expr) \
547 if (unlikely(!(expr))) { \
548 printk(KERN_ERR "\nAssertion failure in %s() " \
550 "\trbd_assert(%s);\n\n", \
551 __func__, __LINE__, #expr); \
554 #else /* !RBD_DEBUG */
555 # define rbd_assert(expr) ((void) 0)
556 #endif /* !RBD_DEBUG */
558 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
560 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
561 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
562 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
563 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
564 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
566 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
567 u8 *order, u64 *snap_size);
568 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
571 static int rbd_open(struct block_device *bdev, fmode_t mode)
573 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
574 bool removing = false;
576 spin_lock_irq(&rbd_dev->lock);
577 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
580 rbd_dev->open_count++;
581 spin_unlock_irq(&rbd_dev->lock);
585 (void) get_device(&rbd_dev->dev);
590 static void rbd_release(struct gendisk *disk, fmode_t mode)
592 struct rbd_device *rbd_dev = disk->private_data;
593 unsigned long open_count_before;
595 spin_lock_irq(&rbd_dev->lock);
596 open_count_before = rbd_dev->open_count--;
597 spin_unlock_irq(&rbd_dev->lock);
598 rbd_assert(open_count_before > 0);
600 put_device(&rbd_dev->dev);
603 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
607 if (get_user(ro, (int __user *)arg))
610 /* Snapshots can't be marked read-write */
611 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
614 /* Let blkdev_roset() handle it */
618 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
619 unsigned int cmd, unsigned long arg)
621 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
626 ret = rbd_ioctl_set_ro(rbd_dev, arg);
636 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
637 unsigned int cmd, unsigned long arg)
639 return rbd_ioctl(bdev, mode, cmd, arg);
641 #endif /* CONFIG_COMPAT */
643 static const struct block_device_operations rbd_bd_ops = {
644 .owner = THIS_MODULE,
646 .release = rbd_release,
649 .compat_ioctl = rbd_compat_ioctl,
654 * Initialize an rbd client instance. Success or not, this function
655 * consumes ceph_opts. Caller holds client_mutex.
657 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
659 struct rbd_client *rbdc;
662 dout("%s:\n", __func__);
663 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
667 kref_init(&rbdc->kref);
668 INIT_LIST_HEAD(&rbdc->node);
670 rbdc->client = ceph_create_client(ceph_opts, rbdc);
671 if (IS_ERR(rbdc->client))
673 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
675 ret = ceph_open_session(rbdc->client);
679 spin_lock(&rbd_client_list_lock);
680 list_add_tail(&rbdc->node, &rbd_client_list);
681 spin_unlock(&rbd_client_list_lock);
683 dout("%s: rbdc %p\n", __func__, rbdc);
687 ceph_destroy_client(rbdc->client);
692 ceph_destroy_options(ceph_opts);
693 dout("%s: error %d\n", __func__, ret);
698 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
700 kref_get(&rbdc->kref);
706 * Find a ceph client with specific addr and configuration. If
707 * found, bump its reference count.
709 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
711 struct rbd_client *client_node;
714 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
717 spin_lock(&rbd_client_list_lock);
718 list_for_each_entry(client_node, &rbd_client_list, node) {
719 if (!ceph_compare_options(ceph_opts, client_node->client)) {
720 __rbd_get_client(client_node);
726 spin_unlock(&rbd_client_list_lock);
728 return found ? client_node : NULL;
732 * (Per device) rbd map options
741 /* string args above */
750 static match_table_t rbd_opts_tokens = {
751 {Opt_queue_depth, "queue_depth=%d"},
752 {Opt_lock_timeout, "lock_timeout=%d"},
754 {Opt_pool_ns, "_pool_ns=%s"},
755 /* string args above */
756 {Opt_read_only, "read_only"},
757 {Opt_read_only, "ro"}, /* Alternate spelling */
758 {Opt_read_write, "read_write"},
759 {Opt_read_write, "rw"}, /* Alternate spelling */
760 {Opt_lock_on_read, "lock_on_read"},
761 {Opt_exclusive, "exclusive"},
762 {Opt_notrim, "notrim"},
768 unsigned long lock_timeout;
775 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
776 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
777 #define RBD_READ_ONLY_DEFAULT false
778 #define RBD_LOCK_ON_READ_DEFAULT false
779 #define RBD_EXCLUSIVE_DEFAULT false
780 #define RBD_TRIM_DEFAULT true
782 struct parse_rbd_opts_ctx {
783 struct rbd_spec *spec;
784 struct rbd_options *opts;
787 static int parse_rbd_opts_token(char *c, void *private)
789 struct parse_rbd_opts_ctx *pctx = private;
790 substring_t argstr[MAX_OPT_ARGS];
791 int token, intval, ret;
793 token = match_token(c, rbd_opts_tokens, argstr);
794 if (token < Opt_last_int) {
795 ret = match_int(&argstr[0], &intval);
797 pr_err("bad option arg (not int) at '%s'\n", c);
800 dout("got int token %d val %d\n", token, intval);
801 } else if (token > Opt_last_int && token < Opt_last_string) {
802 dout("got string token %d val %s\n", token, argstr[0].from);
804 dout("got token %d\n", token);
808 case Opt_queue_depth:
810 pr_err("queue_depth out of range\n");
813 pctx->opts->queue_depth = intval;
815 case Opt_lock_timeout:
816 /* 0 is "wait forever" (i.e. infinite timeout) */
817 if (intval < 0 || intval > INT_MAX / 1000) {
818 pr_err("lock_timeout out of range\n");
821 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
824 kfree(pctx->spec->pool_ns);
825 pctx->spec->pool_ns = match_strdup(argstr);
826 if (!pctx->spec->pool_ns)
830 pctx->opts->read_only = true;
833 pctx->opts->read_only = false;
835 case Opt_lock_on_read:
836 pctx->opts->lock_on_read = true;
839 pctx->opts->exclusive = true;
842 pctx->opts->trim = false;
845 /* libceph prints "bad option" msg */
852 static char* obj_op_name(enum obj_operation_type op_type)
867 * Destroy ceph client
869 * Caller must hold rbd_client_list_lock.
871 static void rbd_client_release(struct kref *kref)
873 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
875 dout("%s: rbdc %p\n", __func__, rbdc);
876 spin_lock(&rbd_client_list_lock);
877 list_del(&rbdc->node);
878 spin_unlock(&rbd_client_list_lock);
880 ceph_destroy_client(rbdc->client);
885 * Drop reference to ceph client node. If it's not referenced anymore, release
888 static void rbd_put_client(struct rbd_client *rbdc)
891 kref_put(&rbdc->kref, rbd_client_release);
894 static int wait_for_latest_osdmap(struct ceph_client *client)
899 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
903 if (client->osdc.osdmap->epoch >= newest_epoch)
906 ceph_osdc_maybe_request_map(&client->osdc);
907 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
908 client->options->mount_timeout);
912 * Get a ceph client with specific addr and configuration, if one does
913 * not exist create it. Either way, ceph_opts is consumed by this
916 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
918 struct rbd_client *rbdc;
921 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
922 rbdc = rbd_client_find(ceph_opts);
924 ceph_destroy_options(ceph_opts);
927 * Using an existing client. Make sure ->pg_pools is up to
928 * date before we look up the pool id in do_rbd_add().
930 ret = wait_for_latest_osdmap(rbdc->client);
932 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
933 rbd_put_client(rbdc);
937 rbdc = rbd_client_create(ceph_opts);
939 mutex_unlock(&client_mutex);
944 static bool rbd_image_format_valid(u32 image_format)
946 return image_format == 1 || image_format == 2;
949 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
954 /* The header has to start with the magic rbd header text */
955 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
958 /* The bio layer requires at least sector-sized I/O */
960 if (ondisk->options.order < SECTOR_SHIFT)
963 /* If we use u64 in a few spots we may be able to loosen this */
965 if (ondisk->options.order > 8 * sizeof (int) - 1)
969 * The size of a snapshot header has to fit in a size_t, and
970 * that limits the number of snapshots.
972 snap_count = le32_to_cpu(ondisk->snap_count);
973 size = SIZE_MAX - sizeof (struct ceph_snap_context);
974 if (snap_count > size / sizeof (__le64))
978 * Not only that, but the size of the entire the snapshot
979 * header must also be representable in a size_t.
981 size -= snap_count * sizeof (__le64);
982 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
989 * returns the size of an object in the image
991 static u32 rbd_obj_bytes(struct rbd_image_header *header)
993 return 1U << header->obj_order;
996 static void rbd_init_layout(struct rbd_device *rbd_dev)
998 if (rbd_dev->header.stripe_unit == 0 ||
999 rbd_dev->header.stripe_count == 0) {
1000 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1001 rbd_dev->header.stripe_count = 1;
1004 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1005 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1006 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1007 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1008 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1009 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1013 * Fill an rbd image header with information from the given format 1
1016 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1017 struct rbd_image_header_ondisk *ondisk)
1019 struct rbd_image_header *header = &rbd_dev->header;
1020 bool first_time = header->object_prefix == NULL;
1021 struct ceph_snap_context *snapc;
1022 char *object_prefix = NULL;
1023 char *snap_names = NULL;
1024 u64 *snap_sizes = NULL;
1029 /* Allocate this now to avoid having to handle failure below */
1032 object_prefix = kstrndup(ondisk->object_prefix,
1033 sizeof(ondisk->object_prefix),
1039 /* Allocate the snapshot context and fill it in */
1041 snap_count = le32_to_cpu(ondisk->snap_count);
1042 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1045 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1047 struct rbd_image_snap_ondisk *snaps;
1048 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1050 /* We'll keep a copy of the snapshot names... */
1052 if (snap_names_len > (u64)SIZE_MAX)
1054 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1058 /* ...as well as the array of their sizes. */
1059 snap_sizes = kmalloc_array(snap_count,
1060 sizeof(*header->snap_sizes),
1066 * Copy the names, and fill in each snapshot's id
1069 * Note that rbd_dev_v1_header_info() guarantees the
1070 * ondisk buffer we're working with has
1071 * snap_names_len bytes beyond the end of the
1072 * snapshot id array, this memcpy() is safe.
1074 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1075 snaps = ondisk->snaps;
1076 for (i = 0; i < snap_count; i++) {
1077 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1078 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1082 /* We won't fail any more, fill in the header */
1085 header->object_prefix = object_prefix;
1086 header->obj_order = ondisk->options.order;
1087 rbd_init_layout(rbd_dev);
1089 ceph_put_snap_context(header->snapc);
1090 kfree(header->snap_names);
1091 kfree(header->snap_sizes);
1094 /* The remaining fields always get updated (when we refresh) */
1096 header->image_size = le64_to_cpu(ondisk->image_size);
1097 header->snapc = snapc;
1098 header->snap_names = snap_names;
1099 header->snap_sizes = snap_sizes;
1107 ceph_put_snap_context(snapc);
1108 kfree(object_prefix);
1113 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1115 const char *snap_name;
1117 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1119 /* Skip over names until we find the one we are looking for */
1121 snap_name = rbd_dev->header.snap_names;
1123 snap_name += strlen(snap_name) + 1;
1125 return kstrdup(snap_name, GFP_KERNEL);
1129 * Snapshot id comparison function for use with qsort()/bsearch().
1130 * Note that result is for snapshots in *descending* order.
1132 static int snapid_compare_reverse(const void *s1, const void *s2)
1134 u64 snap_id1 = *(u64 *)s1;
1135 u64 snap_id2 = *(u64 *)s2;
1137 if (snap_id1 < snap_id2)
1139 return snap_id1 == snap_id2 ? 0 : -1;
1143 * Search a snapshot context to see if the given snapshot id is
1146 * Returns the position of the snapshot id in the array if it's found,
1147 * or BAD_SNAP_INDEX otherwise.
1149 * Note: The snapshot array is in kept sorted (by the osd) in
1150 * reverse order, highest snapshot id first.
1152 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1154 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1157 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1158 sizeof (snap_id), snapid_compare_reverse);
1160 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1163 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1167 const char *snap_name;
1169 which = rbd_dev_snap_index(rbd_dev, snap_id);
1170 if (which == BAD_SNAP_INDEX)
1171 return ERR_PTR(-ENOENT);
1173 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1174 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1177 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1179 if (snap_id == CEPH_NOSNAP)
1180 return RBD_SNAP_HEAD_NAME;
1182 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1183 if (rbd_dev->image_format == 1)
1184 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1186 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1189 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1192 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 if (snap_id == CEPH_NOSNAP) {
1194 *snap_size = rbd_dev->header.image_size;
1195 } else if (rbd_dev->image_format == 1) {
1198 which = rbd_dev_snap_index(rbd_dev, snap_id);
1199 if (which == BAD_SNAP_INDEX)
1202 *snap_size = rbd_dev->header.snap_sizes[which];
1207 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1216 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1219 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1220 if (snap_id == CEPH_NOSNAP) {
1221 *snap_features = rbd_dev->header.features;
1222 } else if (rbd_dev->image_format == 1) {
1223 *snap_features = 0; /* No features for format 1 */
1228 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1232 *snap_features = features;
1237 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1239 u64 snap_id = rbd_dev->spec->snap_id;
1244 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1247 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1251 rbd_dev->mapping.size = size;
1252 rbd_dev->mapping.features = features;
1257 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1259 rbd_dev->mapping.size = 0;
1260 rbd_dev->mapping.features = 0;
1263 static void zero_bvec(struct bio_vec *bv)
1266 unsigned long flags;
1268 buf = bvec_kmap_irq(bv, &flags);
1269 memset(buf, 0, bv->bv_len);
1270 flush_dcache_page(bv->bv_page);
1271 bvec_kunmap_irq(buf, &flags);
1274 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1276 struct ceph_bio_iter it = *bio_pos;
1278 ceph_bio_iter_advance(&it, off);
1279 ceph_bio_iter_advance_step(&it, bytes, ({
1284 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1286 struct ceph_bvec_iter it = *bvec_pos;
1288 ceph_bvec_iter_advance(&it, off);
1289 ceph_bvec_iter_advance_step(&it, bytes, ({
1295 * Zero a range in @obj_req data buffer defined by a bio (list) or
1296 * (private) bio_vec array.
1298 * @off is relative to the start of the data buffer.
1300 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1303 switch (obj_req->img_request->data_type) {
1304 case OBJ_REQUEST_BIO:
1305 zero_bios(&obj_req->bio_pos, off, bytes);
1307 case OBJ_REQUEST_BVECS:
1308 case OBJ_REQUEST_OWN_BVECS:
1309 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1316 static void rbd_obj_request_destroy(struct kref *kref);
1317 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1319 rbd_assert(obj_request != NULL);
1320 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1321 kref_read(&obj_request->kref));
1322 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1325 static void rbd_img_request_get(struct rbd_img_request *img_request)
1327 dout("%s: img %p (was %d)\n", __func__, img_request,
1328 kref_read(&img_request->kref));
1329 kref_get(&img_request->kref);
1332 static void rbd_img_request_destroy(struct kref *kref);
1333 static void rbd_img_request_put(struct rbd_img_request *img_request)
1335 rbd_assert(img_request != NULL);
1336 dout("%s: img %p (was %d)\n", __func__, img_request,
1337 kref_read(&img_request->kref));
1338 kref_put(&img_request->kref, rbd_img_request_destroy);
1341 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1342 struct rbd_obj_request *obj_request)
1344 rbd_assert(obj_request->img_request == NULL);
1346 /* Image request now owns object's original reference */
1347 obj_request->img_request = img_request;
1348 img_request->obj_request_count++;
1349 img_request->pending_count++;
1350 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1353 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1354 struct rbd_obj_request *obj_request)
1356 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1357 list_del(&obj_request->ex.oe_item);
1358 rbd_assert(img_request->obj_request_count > 0);
1359 img_request->obj_request_count--;
1360 rbd_assert(obj_request->img_request == img_request);
1361 rbd_obj_request_put(obj_request);
1364 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1366 struct ceph_osd_request *osd_req = obj_request->osd_req;
1368 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1369 obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1370 obj_request->ex.oe_len, osd_req);
1371 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1375 * The default/initial value for all image request flags is 0. Each
1376 * is conditionally set to 1 at image request initialization time
1377 * and currently never change thereafter.
1379 static void img_request_layered_set(struct rbd_img_request *img_request)
1381 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1385 static void img_request_layered_clear(struct rbd_img_request *img_request)
1387 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1391 static bool img_request_layered_test(struct rbd_img_request *img_request)
1394 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1397 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1399 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1401 return !obj_req->ex.oe_off &&
1402 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1405 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1407 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1409 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1410 rbd_dev->layout.object_size;
1413 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1415 return ceph_file_extents_bytes(obj_req->img_extents,
1416 obj_req->num_img_extents);
1419 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1421 switch (img_req->op_type) {
1425 case OBJ_OP_DISCARD:
1432 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1434 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1436 struct rbd_obj_request *obj_req = osd_req->r_priv;
1438 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1439 osd_req->r_result, obj_req);
1440 rbd_assert(osd_req == obj_req->osd_req);
1442 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1443 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1444 obj_req->xferred = osd_req->r_result;
1447 * Writes aren't allowed to return a data payload. In some
1448 * guarded write cases (e.g. stat + zero on an empty object)
1449 * a stat response makes it through, but we don't care.
1451 obj_req->xferred = 0;
1453 rbd_obj_handle_request(obj_req);
1456 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1458 struct ceph_osd_request *osd_req = obj_request->osd_req;
1460 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1461 osd_req->r_snapid = obj_request->img_request->snap_id;
1464 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1466 struct ceph_osd_request *osd_req = obj_request->osd_req;
1468 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1469 ktime_get_real_ts64(&osd_req->r_mtime);
1470 osd_req->r_data_offset = obj_request->ex.oe_off;
1473 static struct ceph_osd_request *
1474 rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
1476 struct rbd_img_request *img_req = obj_req->img_request;
1477 struct rbd_device *rbd_dev = img_req->rbd_dev;
1478 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1479 struct ceph_osd_request *req;
1480 const char *name_format = rbd_dev->image_format == 1 ?
1481 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1483 req = ceph_osdc_alloc_request(osdc,
1484 (rbd_img_is_write(img_req) ? img_req->snapc : NULL),
1485 num_ops, false, GFP_NOIO);
1489 req->r_callback = rbd_osd_req_callback;
1490 req->r_priv = obj_req;
1493 * Data objects may be stored in a separate pool, but always in
1494 * the same namespace in that pool as the header in its pool.
1496 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1497 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1499 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1500 rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
1503 if (ceph_osdc_alloc_messages(req, GFP_NOIO))
1509 ceph_osdc_put_request(req);
1513 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1515 ceph_osdc_put_request(osd_req);
1518 static struct rbd_obj_request *rbd_obj_request_create(void)
1520 struct rbd_obj_request *obj_request;
1522 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1526 ceph_object_extent_init(&obj_request->ex);
1527 kref_init(&obj_request->kref);
1529 dout("%s %p\n", __func__, obj_request);
1533 static void rbd_obj_request_destroy(struct kref *kref)
1535 struct rbd_obj_request *obj_request;
1538 obj_request = container_of(kref, struct rbd_obj_request, kref);
1540 dout("%s: obj %p\n", __func__, obj_request);
1542 if (obj_request->osd_req)
1543 rbd_osd_req_destroy(obj_request->osd_req);
1545 switch (obj_request->img_request->data_type) {
1546 case OBJ_REQUEST_NODATA:
1547 case OBJ_REQUEST_BIO:
1548 case OBJ_REQUEST_BVECS:
1549 break; /* Nothing to do */
1550 case OBJ_REQUEST_OWN_BVECS:
1551 kfree(obj_request->bvec_pos.bvecs);
1557 kfree(obj_request->img_extents);
1558 if (obj_request->copyup_bvecs) {
1559 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1560 if (obj_request->copyup_bvecs[i].bv_page)
1561 __free_page(obj_request->copyup_bvecs[i].bv_page);
1563 kfree(obj_request->copyup_bvecs);
1566 kmem_cache_free(rbd_obj_request_cache, obj_request);
1569 /* It's OK to call this for a device with no parent */
1571 static void rbd_spec_put(struct rbd_spec *spec);
1572 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1574 rbd_dev_remove_parent(rbd_dev);
1575 rbd_spec_put(rbd_dev->parent_spec);
1576 rbd_dev->parent_spec = NULL;
1577 rbd_dev->parent_overlap = 0;
1581 * Parent image reference counting is used to determine when an
1582 * image's parent fields can be safely torn down--after there are no
1583 * more in-flight requests to the parent image. When the last
1584 * reference is dropped, cleaning them up is safe.
1586 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1590 if (!rbd_dev->parent_spec)
1593 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1597 /* Last reference; clean up parent data structures */
1600 rbd_dev_unparent(rbd_dev);
1602 rbd_warn(rbd_dev, "parent reference underflow");
1606 * If an image has a non-zero parent overlap, get a reference to its
1609 * Returns true if the rbd device has a parent with a non-zero
1610 * overlap and a reference for it was successfully taken, or
1613 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1617 if (!rbd_dev->parent_spec)
1620 down_read(&rbd_dev->header_rwsem);
1621 if (rbd_dev->parent_overlap)
1622 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1623 up_read(&rbd_dev->header_rwsem);
1626 rbd_warn(rbd_dev, "parent reference overflow");
1632 * Caller is responsible for filling in the list of object requests
1633 * that comprises the image request, and the Linux request pointer
1634 * (if there is one).
1636 static struct rbd_img_request *rbd_img_request_create(
1637 struct rbd_device *rbd_dev,
1638 enum obj_operation_type op_type,
1639 struct ceph_snap_context *snapc)
1641 struct rbd_img_request *img_request;
1643 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1647 img_request->rbd_dev = rbd_dev;
1648 img_request->op_type = op_type;
1649 if (!rbd_img_is_write(img_request))
1650 img_request->snap_id = rbd_dev->spec->snap_id;
1652 img_request->snapc = snapc;
1654 if (rbd_dev_parent_get(rbd_dev))
1655 img_request_layered_set(img_request);
1657 spin_lock_init(&img_request->completion_lock);
1658 INIT_LIST_HEAD(&img_request->object_extents);
1659 kref_init(&img_request->kref);
1661 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1662 obj_op_name(op_type), img_request);
1666 static void rbd_img_request_destroy(struct kref *kref)
1668 struct rbd_img_request *img_request;
1669 struct rbd_obj_request *obj_request;
1670 struct rbd_obj_request *next_obj_request;
1672 img_request = container_of(kref, struct rbd_img_request, kref);
1674 dout("%s: img %p\n", __func__, img_request);
1676 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1677 rbd_img_obj_request_del(img_request, obj_request);
1678 rbd_assert(img_request->obj_request_count == 0);
1680 if (img_request_layered_test(img_request)) {
1681 img_request_layered_clear(img_request);
1682 rbd_dev_parent_put(img_request->rbd_dev);
1685 if (rbd_img_is_write(img_request))
1686 ceph_put_snap_context(img_request->snapc);
1688 kmem_cache_free(rbd_img_request_cache, img_request);
1691 static void prune_extents(struct ceph_file_extent *img_extents,
1692 u32 *num_img_extents, u64 overlap)
1694 u32 cnt = *num_img_extents;
1696 /* drop extents completely beyond the overlap */
1697 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1701 struct ceph_file_extent *ex = &img_extents[cnt - 1];
1703 /* trim final overlapping extent */
1704 if (ex->fe_off + ex->fe_len > overlap)
1705 ex->fe_len = overlap - ex->fe_off;
1708 *num_img_extents = cnt;
1712 * Determine the byte range(s) covered by either just the object extent
1713 * or the entire object in the parent image.
1715 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1718 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1721 if (!rbd_dev->parent_overlap)
1724 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1725 entire ? 0 : obj_req->ex.oe_off,
1726 entire ? rbd_dev->layout.object_size :
1728 &obj_req->img_extents,
1729 &obj_req->num_img_extents);
1733 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1734 rbd_dev->parent_overlap);
1738 static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1740 switch (obj_req->img_request->data_type) {
1741 case OBJ_REQUEST_BIO:
1742 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1744 obj_req->ex.oe_len);
1746 case OBJ_REQUEST_BVECS:
1747 case OBJ_REQUEST_OWN_BVECS:
1748 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
1749 obj_req->ex.oe_len);
1750 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
1751 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1752 &obj_req->bvec_pos);
1759 static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1761 obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
1762 if (!obj_req->osd_req)
1765 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
1766 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1767 rbd_osd_req_setup_data(obj_req, 0);
1769 rbd_osd_req_format_read(obj_req);
1773 static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1776 struct page **pages;
1779 * The response data for a STAT call consists of:
1786 pages = ceph_alloc_page_vector(1, GFP_NOIO);
1788 return PTR_ERR(pages);
1790 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1791 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1792 8 + sizeof(struct ceph_timespec),
1797 static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1800 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1803 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1804 rbd_dev->layout.object_size,
1805 rbd_dev->layout.object_size);
1807 if (rbd_obj_is_entire(obj_req))
1808 opcode = CEPH_OSD_OP_WRITEFULL;
1810 opcode = CEPH_OSD_OP_WRITE;
1812 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
1813 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1814 rbd_osd_req_setup_data(obj_req, which++);
1816 rbd_assert(which == obj_req->osd_req->r_num_ops);
1817 rbd_osd_req_format_write(obj_req);
1820 static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1822 unsigned int num_osd_ops, which = 0;
1825 /* reverse map the entire object onto the parent */
1826 ret = rbd_obj_calc_img_extents(obj_req, true);
1830 if (obj_req->num_img_extents) {
1831 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1832 num_osd_ops = 3; /* stat + setallochint + write/writefull */
1834 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1835 num_osd_ops = 2; /* setallochint + write/writefull */
1838 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1839 if (!obj_req->osd_req)
1842 if (obj_req->num_img_extents) {
1843 ret = __rbd_obj_setup_stat(obj_req, which++);
1848 __rbd_obj_setup_write(obj_req, which);
1852 static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
1857 if (rbd_obj_is_entire(obj_req)) {
1858 if (obj_req->num_img_extents) {
1859 osd_req_op_init(obj_req->osd_req, which++,
1860 CEPH_OSD_OP_CREATE, 0);
1861 opcode = CEPH_OSD_OP_TRUNCATE;
1863 osd_req_op_init(obj_req->osd_req, which++,
1864 CEPH_OSD_OP_DELETE, 0);
1867 } else if (rbd_obj_is_tail(obj_req)) {
1868 opcode = CEPH_OSD_OP_TRUNCATE;
1870 opcode = CEPH_OSD_OP_ZERO;
1874 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
1875 obj_req->ex.oe_off, obj_req->ex.oe_len,
1878 rbd_assert(which == obj_req->osd_req->r_num_ops);
1879 rbd_osd_req_format_write(obj_req);
1882 static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
1884 unsigned int num_osd_ops, which = 0;
1887 /* reverse map the entire object onto the parent */
1888 ret = rbd_obj_calc_img_extents(obj_req, true);
1892 if (rbd_obj_is_entire(obj_req)) {
1893 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1894 if (obj_req->num_img_extents)
1895 num_osd_ops = 2; /* create + truncate */
1897 num_osd_ops = 1; /* delete */
1899 if (obj_req->num_img_extents) {
1900 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1901 num_osd_ops = 2; /* stat + truncate/zero */
1903 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1904 num_osd_ops = 1; /* truncate/zero */
1908 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1909 if (!obj_req->osd_req)
1912 if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
1913 ret = __rbd_obj_setup_stat(obj_req, which++);
1918 __rbd_obj_setup_discard(obj_req, which);
1923 * For each object request in @img_req, allocate an OSD request, add
1924 * individual OSD ops and prepare them for submission. The number of
1925 * OSD ops depends on op_type and the overlap point (if any).
1927 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
1929 struct rbd_obj_request *obj_req;
1932 for_each_obj_request(img_req, obj_req) {
1933 switch (img_req->op_type) {
1935 ret = rbd_obj_setup_read(obj_req);
1938 ret = rbd_obj_setup_write(obj_req);
1940 case OBJ_OP_DISCARD:
1941 ret = rbd_obj_setup_discard(obj_req);
1953 union rbd_img_fill_iter {
1954 struct ceph_bio_iter bio_iter;
1955 struct ceph_bvec_iter bvec_iter;
1958 struct rbd_img_fill_ctx {
1959 enum obj_request_type pos_type;
1960 union rbd_img_fill_iter *pos;
1961 union rbd_img_fill_iter iter;
1962 ceph_object_extent_fn_t set_pos_fn;
1963 ceph_object_extent_fn_t count_fn;
1964 ceph_object_extent_fn_t copy_fn;
1967 static struct ceph_object_extent *alloc_object_extent(void *arg)
1969 struct rbd_img_request *img_req = arg;
1970 struct rbd_obj_request *obj_req;
1972 obj_req = rbd_obj_request_create();
1976 rbd_img_obj_request_add(img_req, obj_req);
1977 return &obj_req->ex;
1981 * While su != os && sc == 1 is technically not fancy (it's the same
1982 * layout as su == os && sc == 1), we can't use the nocopy path for it
1983 * because ->set_pos_fn() should be called only once per object.
1984 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1985 * treat su != os && sc == 1 as fancy.
1987 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
1989 return l->stripe_unit != l->object_size;
1992 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
1993 struct ceph_file_extent *img_extents,
1994 u32 num_img_extents,
1995 struct rbd_img_fill_ctx *fctx)
2000 img_req->data_type = fctx->pos_type;
2003 * Create object requests and set each object request's starting
2004 * position in the provided bio (list) or bio_vec array.
2006 fctx->iter = *fctx->pos;
2007 for (i = 0; i < num_img_extents; i++) {
2008 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2009 img_extents[i].fe_off,
2010 img_extents[i].fe_len,
2011 &img_req->object_extents,
2012 alloc_object_extent, img_req,
2013 fctx->set_pos_fn, &fctx->iter);
2018 return __rbd_img_fill_request(img_req);
2022 * Map a list of image extents to a list of object extents, create the
2023 * corresponding object requests (normally each to a different object,
2024 * but not always) and add them to @img_req. For each object request,
2025 * set up its data descriptor to point to the corresponding chunk(s) of
2026 * @fctx->pos data buffer.
2028 * Because ceph_file_to_extents() will merge adjacent object extents
2029 * together, each object request's data descriptor may point to multiple
2030 * different chunks of @fctx->pos data buffer.
2032 * @fctx->pos data buffer is assumed to be large enough.
2034 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2035 struct ceph_file_extent *img_extents,
2036 u32 num_img_extents,
2037 struct rbd_img_fill_ctx *fctx)
2039 struct rbd_device *rbd_dev = img_req->rbd_dev;
2040 struct rbd_obj_request *obj_req;
2044 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2045 !rbd_layout_is_fancy(&rbd_dev->layout))
2046 return rbd_img_fill_request_nocopy(img_req, img_extents,
2047 num_img_extents, fctx);
2049 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2052 * Create object requests and determine ->bvec_count for each object
2053 * request. Note that ->bvec_count sum over all object requests may
2054 * be greater than the number of bio_vecs in the provided bio (list)
2055 * or bio_vec array because when mapped, those bio_vecs can straddle
2056 * stripe unit boundaries.
2058 fctx->iter = *fctx->pos;
2059 for (i = 0; i < num_img_extents; i++) {
2060 ret = ceph_file_to_extents(&rbd_dev->layout,
2061 img_extents[i].fe_off,
2062 img_extents[i].fe_len,
2063 &img_req->object_extents,
2064 alloc_object_extent, img_req,
2065 fctx->count_fn, &fctx->iter);
2070 for_each_obj_request(img_req, obj_req) {
2071 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2072 sizeof(*obj_req->bvec_pos.bvecs),
2074 if (!obj_req->bvec_pos.bvecs)
2079 * Fill in each object request's private bio_vec array, splitting and
2080 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2082 fctx->iter = *fctx->pos;
2083 for (i = 0; i < num_img_extents; i++) {
2084 ret = ceph_iterate_extents(&rbd_dev->layout,
2085 img_extents[i].fe_off,
2086 img_extents[i].fe_len,
2087 &img_req->object_extents,
2088 fctx->copy_fn, &fctx->iter);
2093 return __rbd_img_fill_request(img_req);
2096 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2099 struct ceph_file_extent ex = { off, len };
2100 union rbd_img_fill_iter dummy;
2101 struct rbd_img_fill_ctx fctx = {
2102 .pos_type = OBJ_REQUEST_NODATA,
2106 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2109 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2111 struct rbd_obj_request *obj_req =
2112 container_of(ex, struct rbd_obj_request, ex);
2113 struct ceph_bio_iter *it = arg;
2115 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2116 obj_req->bio_pos = *it;
2117 ceph_bio_iter_advance(it, bytes);
2120 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2122 struct rbd_obj_request *obj_req =
2123 container_of(ex, struct rbd_obj_request, ex);
2124 struct ceph_bio_iter *it = arg;
2126 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2127 ceph_bio_iter_advance_step(it, bytes, ({
2128 obj_req->bvec_count++;
2133 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2135 struct rbd_obj_request *obj_req =
2136 container_of(ex, struct rbd_obj_request, ex);
2137 struct ceph_bio_iter *it = arg;
2139 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2140 ceph_bio_iter_advance_step(it, bytes, ({
2141 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2142 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2146 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2147 struct ceph_file_extent *img_extents,
2148 u32 num_img_extents,
2149 struct ceph_bio_iter *bio_pos)
2151 struct rbd_img_fill_ctx fctx = {
2152 .pos_type = OBJ_REQUEST_BIO,
2153 .pos = (union rbd_img_fill_iter *)bio_pos,
2154 .set_pos_fn = set_bio_pos,
2155 .count_fn = count_bio_bvecs,
2156 .copy_fn = copy_bio_bvecs,
2159 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2163 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2164 u64 off, u64 len, struct bio *bio)
2166 struct ceph_file_extent ex = { off, len };
2167 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2169 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2172 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2174 struct rbd_obj_request *obj_req =
2175 container_of(ex, struct rbd_obj_request, ex);
2176 struct ceph_bvec_iter *it = arg;
2178 obj_req->bvec_pos = *it;
2179 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2180 ceph_bvec_iter_advance(it, bytes);
2183 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2185 struct rbd_obj_request *obj_req =
2186 container_of(ex, struct rbd_obj_request, ex);
2187 struct ceph_bvec_iter *it = arg;
2189 ceph_bvec_iter_advance_step(it, bytes, ({
2190 obj_req->bvec_count++;
2194 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2196 struct rbd_obj_request *obj_req =
2197 container_of(ex, struct rbd_obj_request, ex);
2198 struct ceph_bvec_iter *it = arg;
2200 ceph_bvec_iter_advance_step(it, bytes, ({
2201 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2202 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2206 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2207 struct ceph_file_extent *img_extents,
2208 u32 num_img_extents,
2209 struct ceph_bvec_iter *bvec_pos)
2211 struct rbd_img_fill_ctx fctx = {
2212 .pos_type = OBJ_REQUEST_BVECS,
2213 .pos = (union rbd_img_fill_iter *)bvec_pos,
2214 .set_pos_fn = set_bvec_pos,
2215 .count_fn = count_bvecs,
2216 .copy_fn = copy_bvecs,
2219 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2223 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2224 struct ceph_file_extent *img_extents,
2225 u32 num_img_extents,
2226 struct bio_vec *bvecs)
2228 struct ceph_bvec_iter it = {
2230 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2234 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2238 static void rbd_img_request_submit(struct rbd_img_request *img_request)
2240 struct rbd_obj_request *obj_request;
2242 dout("%s: img %p\n", __func__, img_request);
2244 rbd_img_request_get(img_request);
2245 for_each_obj_request(img_request, obj_request)
2246 rbd_obj_request_submit(obj_request);
2248 rbd_img_request_put(img_request);
2251 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2253 struct rbd_img_request *img_req = obj_req->img_request;
2254 struct rbd_img_request *child_img_req;
2257 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2262 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2263 child_img_req->obj_request = obj_req;
2265 if (!rbd_img_is_write(img_req)) {
2266 switch (img_req->data_type) {
2267 case OBJ_REQUEST_BIO:
2268 ret = __rbd_img_fill_from_bio(child_img_req,
2269 obj_req->img_extents,
2270 obj_req->num_img_extents,
2273 case OBJ_REQUEST_BVECS:
2274 case OBJ_REQUEST_OWN_BVECS:
2275 ret = __rbd_img_fill_from_bvecs(child_img_req,
2276 obj_req->img_extents,
2277 obj_req->num_img_extents,
2278 &obj_req->bvec_pos);
2284 ret = rbd_img_fill_from_bvecs(child_img_req,
2285 obj_req->img_extents,
2286 obj_req->num_img_extents,
2287 obj_req->copyup_bvecs);
2290 rbd_img_request_put(child_img_req);
2294 rbd_img_request_submit(child_img_req);
2298 static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2300 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2303 if (obj_req->result == -ENOENT &&
2304 rbd_dev->parent_overlap && !obj_req->tried_parent) {
2305 /* reverse map this object extent onto the parent */
2306 ret = rbd_obj_calc_img_extents(obj_req, false);
2308 obj_req->result = ret;
2312 if (obj_req->num_img_extents) {
2313 obj_req->tried_parent = true;
2314 ret = rbd_obj_read_from_parent(obj_req);
2316 obj_req->result = ret;
2324 * -ENOENT means a hole in the image -- zero-fill the entire
2325 * length of the request. A short read also implies zero-fill
2326 * to the end of the request. In both cases we update xferred
2327 * count to indicate the whole request was satisfied.
2329 if (obj_req->result == -ENOENT ||
2330 (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
2331 rbd_assert(!obj_req->xferred || !obj_req->result);
2332 rbd_obj_zero_range(obj_req, obj_req->xferred,
2333 obj_req->ex.oe_len - obj_req->xferred);
2334 obj_req->result = 0;
2335 obj_req->xferred = obj_req->ex.oe_len;
2342 * copyup_bvecs pages are never highmem pages
2344 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2346 struct ceph_bvec_iter it = {
2348 .iter = { .bi_size = bytes },
2351 ceph_bvec_iter_advance_step(&it, bytes, ({
2352 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2359 static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2361 unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
2364 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2365 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2366 rbd_osd_req_destroy(obj_req->osd_req);
2369 * Create a copyup request with the same number of OSD ops as
2370 * the original request. The original request was stat + op(s),
2371 * the new copyup request will be copyup + the same op(s).
2373 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
2374 if (!obj_req->osd_req)
2377 ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup");
2382 * Only send non-zero copyup data to save some I/O and network
2383 * bandwidth -- zero copyup data is equivalent to the object not
2386 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2387 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2390 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2391 obj_req->copyup_bvecs,
2392 obj_req->copyup_bvec_count,
2395 switch (obj_req->img_request->op_type) {
2397 __rbd_obj_setup_write(obj_req, 1);
2399 case OBJ_OP_DISCARD:
2400 rbd_assert(!rbd_obj_is_entire(obj_req));
2401 __rbd_obj_setup_discard(obj_req, 1);
2407 rbd_obj_request_submit(obj_req);
2411 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2415 rbd_assert(!obj_req->copyup_bvecs);
2416 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2417 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2418 sizeof(*obj_req->copyup_bvecs),
2420 if (!obj_req->copyup_bvecs)
2423 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2424 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2426 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2427 if (!obj_req->copyup_bvecs[i].bv_page)
2430 obj_req->copyup_bvecs[i].bv_offset = 0;
2431 obj_req->copyup_bvecs[i].bv_len = len;
2435 rbd_assert(!obj_overlap);
2439 static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2441 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2444 rbd_assert(obj_req->num_img_extents);
2445 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2446 rbd_dev->parent_overlap);
2447 if (!obj_req->num_img_extents) {
2449 * The overlap has become 0 (most likely because the
2450 * image has been flattened). Use rbd_obj_issue_copyup()
2451 * to re-submit the original write request -- the copyup
2452 * operation itself will be a no-op, since someone must
2453 * have populated the child object while we weren't
2454 * looking. Move to WRITE_FLAT state as we'll be done
2455 * with the operation once the null copyup completes.
2457 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2458 return rbd_obj_issue_copyup(obj_req, 0);
2461 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
2465 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2466 return rbd_obj_read_from_parent(obj_req);
2469 static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2474 switch (obj_req->write_state) {
2475 case RBD_OBJ_WRITE_GUARD:
2476 rbd_assert(!obj_req->xferred);
2477 if (obj_req->result == -ENOENT) {
2479 * The target object doesn't exist. Read the data for
2480 * the entire target object up to the overlap point (if
2481 * any) from the parent, so we can use it for a copyup.
2483 ret = rbd_obj_handle_write_guard(obj_req);
2485 obj_req->result = ret;
2491 case RBD_OBJ_WRITE_FLAT:
2492 if (!obj_req->result)
2494 * There is no such thing as a successful short
2495 * write -- indicate the whole request was satisfied.
2497 obj_req->xferred = obj_req->ex.oe_len;
2499 case RBD_OBJ_WRITE_COPYUP:
2500 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2501 if (obj_req->result)
2504 rbd_assert(obj_req->xferred);
2505 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2507 obj_req->result = ret;
2517 * Returns true if @obj_req is completed, or false otherwise.
2519 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2521 switch (obj_req->img_request->op_type) {
2523 return rbd_obj_handle_read(obj_req);
2525 return rbd_obj_handle_write(obj_req);
2526 case OBJ_OP_DISCARD:
2527 if (rbd_obj_handle_write(obj_req)) {
2529 * Hide -ENOENT from delete/truncate/zero -- discarding
2530 * a non-existent object is not a problem.
2532 if (obj_req->result == -ENOENT) {
2533 obj_req->result = 0;
2534 obj_req->xferred = obj_req->ex.oe_len;
2544 static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2546 struct rbd_img_request *img_req = obj_req->img_request;
2548 rbd_assert((!obj_req->result &&
2549 obj_req->xferred == obj_req->ex.oe_len) ||
2550 (obj_req->result < 0 && !obj_req->xferred));
2551 if (!obj_req->result) {
2552 img_req->xferred += obj_req->xferred;
2556 rbd_warn(img_req->rbd_dev,
2557 "%s at objno %llu %llu~%llu result %d xferred %llu",
2558 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2559 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
2561 if (!img_req->result) {
2562 img_req->result = obj_req->result;
2563 img_req->xferred = 0;
2567 static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2569 struct rbd_obj_request *obj_req = img_req->obj_request;
2571 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2572 rbd_assert((!img_req->result &&
2573 img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2574 (img_req->result < 0 && !img_req->xferred));
2576 obj_req->result = img_req->result;
2577 obj_req->xferred = img_req->xferred;
2578 rbd_img_request_put(img_req);
2581 static void rbd_img_end_request(struct rbd_img_request *img_req)
2583 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2584 rbd_assert((!img_req->result &&
2585 img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2586 (img_req->result < 0 && !img_req->xferred));
2588 blk_mq_end_request(img_req->rq,
2589 errno_to_blk_status(img_req->result));
2590 rbd_img_request_put(img_req);
2593 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2595 struct rbd_img_request *img_req;
2598 if (!__rbd_obj_handle_request(obj_req))
2601 img_req = obj_req->img_request;
2602 spin_lock(&img_req->completion_lock);
2603 rbd_obj_end_request(obj_req);
2604 rbd_assert(img_req->pending_count);
2605 if (--img_req->pending_count) {
2606 spin_unlock(&img_req->completion_lock);
2610 spin_unlock(&img_req->completion_lock);
2611 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2612 obj_req = img_req->obj_request;
2613 rbd_img_end_child_request(img_req);
2616 rbd_img_end_request(img_req);
2619 static const struct rbd_client_id rbd_empty_cid;
2621 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2622 const struct rbd_client_id *rhs)
2624 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2627 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2629 struct rbd_client_id cid;
2631 mutex_lock(&rbd_dev->watch_mutex);
2632 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2633 cid.handle = rbd_dev->watch_cookie;
2634 mutex_unlock(&rbd_dev->watch_mutex);
2639 * lock_rwsem must be held for write
2641 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2642 const struct rbd_client_id *cid)
2644 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2645 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2646 cid->gid, cid->handle);
2647 rbd_dev->owner_cid = *cid; /* struct */
2650 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2652 mutex_lock(&rbd_dev->watch_mutex);
2653 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2654 mutex_unlock(&rbd_dev->watch_mutex);
2657 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2659 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2661 strcpy(rbd_dev->lock_cookie, cookie);
2662 rbd_set_owner_cid(rbd_dev, &cid);
2663 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2667 * lock_rwsem must be held for write
2669 static int rbd_lock(struct rbd_device *rbd_dev)
2671 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2675 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2676 rbd_dev->lock_cookie[0] != '\0');
2678 format_lock_cookie(rbd_dev, cookie);
2679 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2680 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2681 RBD_LOCK_TAG, "", 0);
2685 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
2686 __rbd_lock(rbd_dev, cookie);
2691 * lock_rwsem must be held for write
2693 static void rbd_unlock(struct rbd_device *rbd_dev)
2695 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2698 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2699 rbd_dev->lock_cookie[0] == '\0');
2701 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2702 RBD_LOCK_NAME, rbd_dev->lock_cookie);
2703 if (ret && ret != -ENOENT)
2704 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
2706 /* treat errors as the image is unlocked */
2707 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
2708 rbd_dev->lock_cookie[0] = '\0';
2709 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2710 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
2713 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2714 enum rbd_notify_op notify_op,
2715 struct page ***preply_pages,
2718 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2719 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2720 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2721 int buf_size = sizeof(buf);
2724 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
2726 /* encode *LockPayload NotifyMessage (op + ClientId) */
2727 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2728 ceph_encode_32(&p, notify_op);
2729 ceph_encode_64(&p, cid.gid);
2730 ceph_encode_64(&p, cid.handle);
2732 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2733 &rbd_dev->header_oloc, buf, buf_size,
2734 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
2737 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2738 enum rbd_notify_op notify_op)
2740 struct page **reply_pages;
2743 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2744 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2747 static void rbd_notify_acquired_lock(struct work_struct *work)
2749 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2750 acquired_lock_work);
2752 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
2755 static void rbd_notify_released_lock(struct work_struct *work)
2757 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2758 released_lock_work);
2760 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
2763 static int rbd_request_lock(struct rbd_device *rbd_dev)
2765 struct page **reply_pages;
2767 bool lock_owner_responded = false;
2770 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2772 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2773 &reply_pages, &reply_len);
2774 if (ret && ret != -ETIMEDOUT) {
2775 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
2779 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2780 void *p = page_address(reply_pages[0]);
2781 void *const end = p + reply_len;
2784 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2789 ceph_decode_need(&p, end, 8 + 8, e_inval);
2790 p += 8 + 8; /* skip gid and cookie */
2792 ceph_decode_32_safe(&p, end, len, e_inval);
2796 if (lock_owner_responded) {
2798 "duplicate lock owners detected");
2803 lock_owner_responded = true;
2804 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2808 "failed to decode ResponseMessage: %d",
2813 ret = ceph_decode_32(&p);
2817 if (!lock_owner_responded) {
2818 rbd_warn(rbd_dev, "no lock owners detected");
2823 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2831 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2833 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2835 cancel_delayed_work(&rbd_dev->lock_dwork);
2837 wake_up_all(&rbd_dev->lock_waitq);
2839 wake_up(&rbd_dev->lock_waitq);
2842 static int get_lock_owner_info(struct rbd_device *rbd_dev,
2843 struct ceph_locker **lockers, u32 *num_lockers)
2845 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2850 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2852 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2853 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2854 &lock_type, &lock_tag, lockers, num_lockers);
2858 if (*num_lockers == 0) {
2859 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2863 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2864 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2870 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2871 rbd_warn(rbd_dev, "shared lock type detected");
2876 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2877 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2878 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2879 (*lockers)[0].id.cookie);
2889 static int find_watcher(struct rbd_device *rbd_dev,
2890 const struct ceph_locker *locker)
2892 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2893 struct ceph_watch_item *watchers;
2899 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2900 &rbd_dev->header_oloc, &watchers,
2905 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2906 for (i = 0; i < num_watchers; i++) {
2907 if (!memcmp(&watchers[i].addr, &locker->info.addr,
2908 sizeof(locker->info.addr)) &&
2909 watchers[i].cookie == cookie) {
2910 struct rbd_client_id cid = {
2911 .gid = le64_to_cpu(watchers[i].name.num),
2915 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2916 rbd_dev, cid.gid, cid.handle);
2917 rbd_set_owner_cid(rbd_dev, &cid);
2923 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2931 * lock_rwsem must be held for write
2933 static int rbd_try_lock(struct rbd_device *rbd_dev)
2935 struct ceph_client *client = rbd_dev->rbd_client->client;
2936 struct ceph_locker *lockers;
2941 ret = rbd_lock(rbd_dev);
2945 /* determine if the current lock holder is still alive */
2946 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2950 if (num_lockers == 0)
2953 ret = find_watcher(rbd_dev, lockers);
2956 ret = 0; /* have to request lock */
2960 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2961 ENTITY_NAME(lockers[0].id.name));
2963 ret = ceph_monc_blacklist_add(&client->monc,
2964 &lockers[0].info.addr);
2966 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2967 ENTITY_NAME(lockers[0].id.name), ret);
2971 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
2972 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2973 lockers[0].id.cookie,
2974 &lockers[0].id.name);
2975 if (ret && ret != -ENOENT)
2979 ceph_free_lockers(lockers, num_lockers);
2983 ceph_free_lockers(lockers, num_lockers);
2988 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2990 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
2993 enum rbd_lock_state lock_state;
2995 down_read(&rbd_dev->lock_rwsem);
2996 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
2997 rbd_dev->lock_state);
2998 if (__rbd_is_lock_owner(rbd_dev)) {
2999 lock_state = rbd_dev->lock_state;
3000 up_read(&rbd_dev->lock_rwsem);
3004 up_read(&rbd_dev->lock_rwsem);
3005 down_write(&rbd_dev->lock_rwsem);
3006 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3007 rbd_dev->lock_state);
3008 if (!__rbd_is_lock_owner(rbd_dev)) {
3009 *pret = rbd_try_lock(rbd_dev);
3011 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3014 lock_state = rbd_dev->lock_state;
3015 up_write(&rbd_dev->lock_rwsem);
3019 static void rbd_acquire_lock(struct work_struct *work)
3021 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3022 struct rbd_device, lock_dwork);
3023 enum rbd_lock_state lock_state;
3026 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3028 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3029 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3030 if (lock_state == RBD_LOCK_STATE_LOCKED)
3031 wake_requests(rbd_dev, true);
3032 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3033 rbd_dev, lock_state, ret);
3037 ret = rbd_request_lock(rbd_dev);
3038 if (ret == -ETIMEDOUT) {
3039 goto again; /* treat this as a dead client */
3040 } else if (ret == -EROFS) {
3041 rbd_warn(rbd_dev, "peer will not release lock");
3043 * If this is rbd_add_acquire_lock(), we want to fail
3044 * immediately -- reuse BLACKLISTED flag. Otherwise we
3047 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3048 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3049 /* wake "rbd map --exclusive" process */
3050 wake_requests(rbd_dev, false);
3052 } else if (ret < 0) {
3053 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3054 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3058 * lock owner acked, but resend if we don't see them
3061 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3063 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3064 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3069 * lock_rwsem must be held for write
3071 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3073 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3074 rbd_dev->lock_state);
3075 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3078 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3079 downgrade_write(&rbd_dev->lock_rwsem);
3081 * Ensure that all in-flight IO is flushed.
3083 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3084 * may be shared with other devices.
3086 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3087 up_read(&rbd_dev->lock_rwsem);
3089 down_write(&rbd_dev->lock_rwsem);
3090 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3091 rbd_dev->lock_state);
3092 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3095 rbd_unlock(rbd_dev);
3097 * Give others a chance to grab the lock - we would re-acquire
3098 * almost immediately if we got new IO during ceph_osdc_sync()
3099 * otherwise. We need to ack our own notifications, so this
3100 * lock_dwork will be requeued from rbd_wait_state_locked()
3101 * after wake_requests() in rbd_handle_released_lock().
3103 cancel_delayed_work(&rbd_dev->lock_dwork);
3107 static void rbd_release_lock_work(struct work_struct *work)
3109 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3112 down_write(&rbd_dev->lock_rwsem);
3113 rbd_release_lock(rbd_dev);
3114 up_write(&rbd_dev->lock_rwsem);
3117 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3120 struct rbd_client_id cid = { 0 };
3122 if (struct_v >= 2) {
3123 cid.gid = ceph_decode_64(p);
3124 cid.handle = ceph_decode_64(p);
3127 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3129 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3130 down_write(&rbd_dev->lock_rwsem);
3131 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3133 * we already know that the remote client is
3136 up_write(&rbd_dev->lock_rwsem);
3140 rbd_set_owner_cid(rbd_dev, &cid);
3141 downgrade_write(&rbd_dev->lock_rwsem);
3143 down_read(&rbd_dev->lock_rwsem);
3146 if (!__rbd_is_lock_owner(rbd_dev))
3147 wake_requests(rbd_dev, false);
3148 up_read(&rbd_dev->lock_rwsem);
3151 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3154 struct rbd_client_id cid = { 0 };
3156 if (struct_v >= 2) {
3157 cid.gid = ceph_decode_64(p);
3158 cid.handle = ceph_decode_64(p);
3161 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3163 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3164 down_write(&rbd_dev->lock_rwsem);
3165 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3166 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3167 __func__, rbd_dev, cid.gid, cid.handle,
3168 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3169 up_write(&rbd_dev->lock_rwsem);
3173 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3174 downgrade_write(&rbd_dev->lock_rwsem);
3176 down_read(&rbd_dev->lock_rwsem);
3179 if (!__rbd_is_lock_owner(rbd_dev))
3180 wake_requests(rbd_dev, false);
3181 up_read(&rbd_dev->lock_rwsem);
3185 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3186 * ResponseMessage is needed.
3188 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3191 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3192 struct rbd_client_id cid = { 0 };
3195 if (struct_v >= 2) {
3196 cid.gid = ceph_decode_64(p);
3197 cid.handle = ceph_decode_64(p);
3200 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3202 if (rbd_cid_equal(&cid, &my_cid))
3205 down_read(&rbd_dev->lock_rwsem);
3206 if (__rbd_is_lock_owner(rbd_dev)) {
3207 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3208 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3212 * encode ResponseMessage(0) so the peer can detect
3217 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3218 if (!rbd_dev->opts->exclusive) {
3219 dout("%s rbd_dev %p queueing unlock_work\n",
3221 queue_work(rbd_dev->task_wq,
3222 &rbd_dev->unlock_work);
3224 /* refuse to release the lock */
3231 up_read(&rbd_dev->lock_rwsem);
3235 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3236 u64 notify_id, u64 cookie, s32 *result)
3238 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3239 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3240 int buf_size = sizeof(buf);
3246 /* encode ResponseMessage */
3247 ceph_start_encoding(&p, 1, 1,
3248 buf_size - CEPH_ENCODING_START_BLK_LEN);
3249 ceph_encode_32(&p, *result);
3254 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3255 &rbd_dev->header_oloc, notify_id, cookie,
3258 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3261 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3264 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3265 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3268 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3269 u64 notify_id, u64 cookie, s32 result)
3271 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3272 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3275 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3276 u64 notifier_id, void *data, size_t data_len)
3278 struct rbd_device *rbd_dev = arg;
3280 void *const end = p + data_len;
3286 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3287 __func__, rbd_dev, cookie, notify_id, data_len);
3289 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3292 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3297 notify_op = ceph_decode_32(&p);
3299 /* legacy notification for header updates */
3300 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3304 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3305 switch (notify_op) {
3306 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3307 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3308 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3310 case RBD_NOTIFY_OP_RELEASED_LOCK:
3311 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3312 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3314 case RBD_NOTIFY_OP_REQUEST_LOCK:
3315 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3317 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3320 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3322 case RBD_NOTIFY_OP_HEADER_UPDATE:
3323 ret = rbd_dev_refresh(rbd_dev);
3325 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3327 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3330 if (rbd_is_lock_owner(rbd_dev))
3331 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3332 cookie, -EOPNOTSUPP);
3334 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3339 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3341 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3343 struct rbd_device *rbd_dev = arg;
3345 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3347 down_write(&rbd_dev->lock_rwsem);
3348 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3349 up_write(&rbd_dev->lock_rwsem);
3351 mutex_lock(&rbd_dev->watch_mutex);
3352 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3353 __rbd_unregister_watch(rbd_dev);
3354 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3356 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3358 mutex_unlock(&rbd_dev->watch_mutex);
3362 * watch_mutex must be locked
3364 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3366 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3367 struct ceph_osd_linger_request *handle;
3369 rbd_assert(!rbd_dev->watch_handle);
3370 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3372 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3373 &rbd_dev->header_oloc, rbd_watch_cb,
3374 rbd_watch_errcb, rbd_dev);
3376 return PTR_ERR(handle);
3378 rbd_dev->watch_handle = handle;
3383 * watch_mutex must be locked
3385 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3387 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3390 rbd_assert(rbd_dev->watch_handle);
3391 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3393 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3395 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3397 rbd_dev->watch_handle = NULL;
3400 static int rbd_register_watch(struct rbd_device *rbd_dev)
3404 mutex_lock(&rbd_dev->watch_mutex);
3405 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3406 ret = __rbd_register_watch(rbd_dev);
3410 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3411 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3414 mutex_unlock(&rbd_dev->watch_mutex);
3418 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3420 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3422 cancel_work_sync(&rbd_dev->acquired_lock_work);
3423 cancel_work_sync(&rbd_dev->released_lock_work);
3424 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3425 cancel_work_sync(&rbd_dev->unlock_work);
3428 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3430 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3431 cancel_tasks_sync(rbd_dev);
3433 mutex_lock(&rbd_dev->watch_mutex);
3434 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3435 __rbd_unregister_watch(rbd_dev);
3436 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3437 mutex_unlock(&rbd_dev->watch_mutex);
3439 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3440 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3444 * lock_rwsem must be held for write
3446 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3448 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3452 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3454 format_lock_cookie(rbd_dev, cookie);
3455 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3456 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3457 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3458 RBD_LOCK_TAG, cookie);
3460 if (ret != -EOPNOTSUPP)
3461 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3465 * Lock cookie cannot be updated on older OSDs, so do
3466 * a manual release and queue an acquire.
3468 if (rbd_release_lock(rbd_dev))
3469 queue_delayed_work(rbd_dev->task_wq,
3470 &rbd_dev->lock_dwork, 0);
3472 __rbd_lock(rbd_dev, cookie);
3476 static void rbd_reregister_watch(struct work_struct *work)
3478 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3479 struct rbd_device, watch_dwork);
3482 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3484 mutex_lock(&rbd_dev->watch_mutex);
3485 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3486 mutex_unlock(&rbd_dev->watch_mutex);
3490 ret = __rbd_register_watch(rbd_dev);
3492 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3493 if (ret == -EBLACKLISTED || ret == -ENOENT) {
3494 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3495 wake_requests(rbd_dev, true);
3497 queue_delayed_work(rbd_dev->task_wq,
3498 &rbd_dev->watch_dwork,
3501 mutex_unlock(&rbd_dev->watch_mutex);
3505 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3506 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3507 mutex_unlock(&rbd_dev->watch_mutex);
3509 down_write(&rbd_dev->lock_rwsem);
3510 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3511 rbd_reacquire_lock(rbd_dev);
3512 up_write(&rbd_dev->lock_rwsem);
3514 ret = rbd_dev_refresh(rbd_dev);
3516 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
3520 * Synchronous osd object method call. Returns the number of bytes
3521 * returned in the outbound buffer, or a negative error code.
3523 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3524 struct ceph_object_id *oid,
3525 struct ceph_object_locator *oloc,
3526 const char *method_name,
3527 const void *outbound,
3528 size_t outbound_size,
3530 size_t inbound_size)
3532 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3533 struct page *req_page = NULL;
3534 struct page *reply_page;
3538 * Method calls are ultimately read operations. The result
3539 * should placed into the inbound buffer provided. They
3540 * also supply outbound data--parameters for the object
3541 * method. Currently if this is present it will be a
3545 if (outbound_size > PAGE_SIZE)
3548 req_page = alloc_page(GFP_KERNEL);
3552 memcpy(page_address(req_page), outbound, outbound_size);
3555 reply_page = alloc_page(GFP_KERNEL);
3558 __free_page(req_page);
3562 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3563 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3564 reply_page, &inbound_size);
3566 memcpy(inbound, page_address(reply_page), inbound_size);
3571 __free_page(req_page);
3572 __free_page(reply_page);
3577 * lock_rwsem must be held for read
3579 static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3582 unsigned long timeout;
3585 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3586 return -EBLACKLISTED;
3588 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3592 rbd_warn(rbd_dev, "exclusive lock required");
3598 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3599 * and cancel_delayed_work() in wake_requests().
3601 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3602 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3603 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3604 TASK_UNINTERRUPTIBLE);
3605 up_read(&rbd_dev->lock_rwsem);
3606 timeout = schedule_timeout(ceph_timeout_jiffies(
3607 rbd_dev->opts->lock_timeout));
3608 down_read(&rbd_dev->lock_rwsem);
3609 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3610 ret = -EBLACKLISTED;
3614 rbd_warn(rbd_dev, "timed out waiting for lock");
3618 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3620 finish_wait(&rbd_dev->lock_waitq, &wait);
3624 static void rbd_queue_workfn(struct work_struct *work)
3626 struct request *rq = blk_mq_rq_from_pdu(work);
3627 struct rbd_device *rbd_dev = rq->q->queuedata;
3628 struct rbd_img_request *img_request;
3629 struct ceph_snap_context *snapc = NULL;
3630 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3631 u64 length = blk_rq_bytes(rq);
3632 enum obj_operation_type op_type;
3634 bool must_be_locked;
3637 switch (req_op(rq)) {
3638 case REQ_OP_DISCARD:
3639 case REQ_OP_WRITE_ZEROES:
3640 op_type = OBJ_OP_DISCARD;
3643 op_type = OBJ_OP_WRITE;
3646 op_type = OBJ_OP_READ;
3649 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3654 /* Ignore/skip any zero-length requests */
3657 dout("%s: zero-length request\n", __func__);
3662 rbd_assert(op_type == OBJ_OP_READ ||
3663 rbd_dev->spec->snap_id == CEPH_NOSNAP);
3666 * Quit early if the mapped snapshot no longer exists. It's
3667 * still possible the snapshot will have disappeared by the
3668 * time our request arrives at the osd, but there's no sense in
3669 * sending it if we already know.
3671 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3672 dout("request for non-existent snapshot");
3673 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3678 if (offset && length > U64_MAX - offset + 1) {
3679 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3682 goto err_rq; /* Shouldn't happen */
3685 blk_mq_start_request(rq);
3687 down_read(&rbd_dev->header_rwsem);
3688 mapping_size = rbd_dev->mapping.size;
3689 if (op_type != OBJ_OP_READ) {
3690 snapc = rbd_dev->header.snapc;
3691 ceph_get_snap_context(snapc);
3693 up_read(&rbd_dev->header_rwsem);
3695 if (offset + length > mapping_size) {
3696 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3697 length, mapping_size);
3703 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3704 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3705 if (must_be_locked) {
3706 down_read(&rbd_dev->lock_rwsem);
3707 result = rbd_wait_state_locked(rbd_dev,
3708 !rbd_dev->opts->exclusive);
3713 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
3718 img_request->rq = rq;
3719 snapc = NULL; /* img_request consumes a ref */
3721 if (op_type == OBJ_OP_DISCARD)
3722 result = rbd_img_fill_nodata(img_request, offset, length);
3724 result = rbd_img_fill_from_bio(img_request, offset, length,
3727 goto err_img_request;
3729 rbd_img_request_submit(img_request);
3731 up_read(&rbd_dev->lock_rwsem);
3735 rbd_img_request_put(img_request);
3738 up_read(&rbd_dev->lock_rwsem);
3741 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3742 obj_op_name(op_type), length, offset, result);
3743 ceph_put_snap_context(snapc);
3745 blk_mq_end_request(rq, errno_to_blk_status(result));
3748 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3749 const struct blk_mq_queue_data *bd)
3751 struct request *rq = bd->rq;
3752 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3754 queue_work(rbd_wq, work);
3758 static void rbd_free_disk(struct rbd_device *rbd_dev)
3760 blk_cleanup_queue(rbd_dev->disk->queue);
3761 blk_mq_free_tag_set(&rbd_dev->tag_set);
3762 put_disk(rbd_dev->disk);
3763 rbd_dev->disk = NULL;
3766 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3767 struct ceph_object_id *oid,
3768 struct ceph_object_locator *oloc,
3769 void *buf, int buf_len)
3772 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3773 struct ceph_osd_request *req;
3774 struct page **pages;
3775 int num_pages = calc_pages_for(0, buf_len);
3778 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3782 ceph_oid_copy(&req->r_base_oid, oid);
3783 ceph_oloc_copy(&req->r_base_oloc, oloc);
3784 req->r_flags = CEPH_OSD_FLAG_READ;
3786 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
3790 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3791 if (IS_ERR(pages)) {
3792 ret = PTR_ERR(pages);
3796 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3797 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3800 ceph_osdc_start_request(osdc, req, false);
3801 ret = ceph_osdc_wait_request(osdc, req);
3803 ceph_copy_from_page_vector(pages, buf, 0, ret);
3806 ceph_osdc_put_request(req);
3811 * Read the complete header for the given rbd device. On successful
3812 * return, the rbd_dev->header field will contain up-to-date
3813 * information about the image.
3815 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3817 struct rbd_image_header_ondisk *ondisk = NULL;
3824 * The complete header will include an array of its 64-bit
3825 * snapshot ids, followed by the names of those snapshots as
3826 * a contiguous block of NUL-terminated strings. Note that
3827 * the number of snapshots could change by the time we read
3828 * it in, in which case we re-read it.
3835 size = sizeof (*ondisk);
3836 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3838 ondisk = kmalloc(size, GFP_KERNEL);
3842 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3843 &rbd_dev->header_oloc, ondisk, size);
3846 if ((size_t)ret < size) {
3848 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3852 if (!rbd_dev_ondisk_valid(ondisk)) {
3854 rbd_warn(rbd_dev, "invalid header");
3858 names_size = le64_to_cpu(ondisk->snap_names_len);
3859 want_count = snap_count;
3860 snap_count = le32_to_cpu(ondisk->snap_count);
3861 } while (snap_count != want_count);
3863 ret = rbd_header_from_disk(rbd_dev, ondisk);
3871 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3872 * has disappeared from the (just updated) snapshot context.
3874 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3878 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3881 snap_id = rbd_dev->spec->snap_id;
3882 if (snap_id == CEPH_NOSNAP)
3885 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3886 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3889 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3894 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3895 * try to update its size. If REMOVING is set, updating size
3896 * is just useless work since the device can't be opened.
3898 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3899 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3900 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3901 dout("setting size to %llu sectors", (unsigned long long)size);
3902 set_capacity(rbd_dev->disk, size);
3903 revalidate_disk(rbd_dev->disk);
3907 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3912 down_write(&rbd_dev->header_rwsem);
3913 mapping_size = rbd_dev->mapping.size;
3915 ret = rbd_dev_header_info(rbd_dev);
3920 * If there is a parent, see if it has disappeared due to the
3921 * mapped image getting flattened.
3923 if (rbd_dev->parent) {
3924 ret = rbd_dev_v2_parent_info(rbd_dev);
3929 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3930 rbd_dev->mapping.size = rbd_dev->header.image_size;
3932 /* validate mapped snapshot's EXISTS flag */
3933 rbd_exists_validate(rbd_dev);
3937 up_write(&rbd_dev->header_rwsem);
3938 if (!ret && mapping_size != rbd_dev->mapping.size)
3939 rbd_dev_update_size(rbd_dev);
3944 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
3945 unsigned int hctx_idx, unsigned int numa_node)
3947 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3949 INIT_WORK(work, rbd_queue_workfn);
3953 static const struct blk_mq_ops rbd_mq_ops = {
3954 .queue_rq = rbd_queue_rq,
3955 .init_request = rbd_init_request,
3958 static int rbd_init_disk(struct rbd_device *rbd_dev)
3960 struct gendisk *disk;
3961 struct request_queue *q;
3962 unsigned int objset_bytes =
3963 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
3966 /* create gendisk info */
3967 disk = alloc_disk(single_major ?
3968 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3969 RBD_MINORS_PER_MAJOR);
3973 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3975 disk->major = rbd_dev->major;
3976 disk->first_minor = rbd_dev->minor;
3978 disk->flags |= GENHD_FL_EXT_DEVT;
3979 disk->fops = &rbd_bd_ops;
3980 disk->private_data = rbd_dev;
3982 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3983 rbd_dev->tag_set.ops = &rbd_mq_ops;
3984 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3985 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3986 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3987 rbd_dev->tag_set.nr_hw_queues = 1;
3988 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3990 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3994 q = blk_mq_init_queue(&rbd_dev->tag_set);
4000 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4001 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4003 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
4004 q->limits.max_sectors = queue_max_hw_sectors(q);
4005 blk_queue_max_segments(q, USHRT_MAX);
4006 blk_queue_max_segment_size(q, UINT_MAX);
4007 blk_queue_io_min(q, objset_bytes);
4008 blk_queue_io_opt(q, objset_bytes);
4010 if (rbd_dev->opts->trim) {
4011 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4012 q->limits.discard_granularity = objset_bytes;
4013 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4014 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4017 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4018 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
4021 * disk_release() expects a queue ref from add_disk() and will
4022 * put it. Hold an extra ref until add_disk() is called.
4024 WARN_ON(!blk_get_queue(q));
4026 q->queuedata = rbd_dev;
4028 rbd_dev->disk = disk;
4032 blk_mq_free_tag_set(&rbd_dev->tag_set);
4042 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4044 return container_of(dev, struct rbd_device, dev);
4047 static ssize_t rbd_size_show(struct device *dev,
4048 struct device_attribute *attr, char *buf)
4050 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4052 return sprintf(buf, "%llu\n",
4053 (unsigned long long)rbd_dev->mapping.size);
4057 * Note this shows the features for whatever's mapped, which is not
4058 * necessarily the base image.
4060 static ssize_t rbd_features_show(struct device *dev,
4061 struct device_attribute *attr, char *buf)
4063 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4065 return sprintf(buf, "0x%016llx\n",
4066 (unsigned long long)rbd_dev->mapping.features);
4069 static ssize_t rbd_major_show(struct device *dev,
4070 struct device_attribute *attr, char *buf)
4072 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4075 return sprintf(buf, "%d\n", rbd_dev->major);
4077 return sprintf(buf, "(none)\n");
4080 static ssize_t rbd_minor_show(struct device *dev,
4081 struct device_attribute *attr, char *buf)
4083 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4085 return sprintf(buf, "%d\n", rbd_dev->minor);
4088 static ssize_t rbd_client_addr_show(struct device *dev,
4089 struct device_attribute *attr, char *buf)
4091 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4092 struct ceph_entity_addr *client_addr =
4093 ceph_client_addr(rbd_dev->rbd_client->client);
4095 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4096 le32_to_cpu(client_addr->nonce));
4099 static ssize_t rbd_client_id_show(struct device *dev,
4100 struct device_attribute *attr, char *buf)
4102 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4104 return sprintf(buf, "client%lld\n",
4105 ceph_client_gid(rbd_dev->rbd_client->client));
4108 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4109 struct device_attribute *attr, char *buf)
4111 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4113 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4116 static ssize_t rbd_config_info_show(struct device *dev,
4117 struct device_attribute *attr, char *buf)
4119 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4121 return sprintf(buf, "%s\n", rbd_dev->config_info);
4124 static ssize_t rbd_pool_show(struct device *dev,
4125 struct device_attribute *attr, char *buf)
4127 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4129 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4132 static ssize_t rbd_pool_id_show(struct device *dev,
4133 struct device_attribute *attr, char *buf)
4135 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4137 return sprintf(buf, "%llu\n",
4138 (unsigned long long) rbd_dev->spec->pool_id);
4141 static ssize_t rbd_pool_ns_show(struct device *dev,
4142 struct device_attribute *attr, char *buf)
4144 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4146 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
4149 static ssize_t rbd_name_show(struct device *dev,
4150 struct device_attribute *attr, char *buf)
4152 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4154 if (rbd_dev->spec->image_name)
4155 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4157 return sprintf(buf, "(unknown)\n");
4160 static ssize_t rbd_image_id_show(struct device *dev,
4161 struct device_attribute *attr, char *buf)
4163 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4165 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4169 * Shows the name of the currently-mapped snapshot (or
4170 * RBD_SNAP_HEAD_NAME for the base image).
4172 static ssize_t rbd_snap_show(struct device *dev,
4173 struct device_attribute *attr,
4176 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4178 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4181 static ssize_t rbd_snap_id_show(struct device *dev,
4182 struct device_attribute *attr, char *buf)
4184 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4186 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4190 * For a v2 image, shows the chain of parent images, separated by empty
4191 * lines. For v1 images or if there is no parent, shows "(no parent
4194 static ssize_t rbd_parent_show(struct device *dev,
4195 struct device_attribute *attr,
4198 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4201 if (!rbd_dev->parent)
4202 return sprintf(buf, "(no parent image)\n");
4204 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4205 struct rbd_spec *spec = rbd_dev->parent_spec;
4207 count += sprintf(&buf[count], "%s"
4208 "pool_id %llu\npool_name %s\n"
4210 "image_id %s\nimage_name %s\n"
4211 "snap_id %llu\nsnap_name %s\n"
4213 !count ? "" : "\n", /* first? */
4214 spec->pool_id, spec->pool_name,
4215 spec->pool_ns ?: "",
4216 spec->image_id, spec->image_name ?: "(unknown)",
4217 spec->snap_id, spec->snap_name,
4218 rbd_dev->parent_overlap);
4224 static ssize_t rbd_image_refresh(struct device *dev,
4225 struct device_attribute *attr,
4229 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4232 ret = rbd_dev_refresh(rbd_dev);
4239 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
4240 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
4241 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
4242 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
4243 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
4244 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
4245 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
4246 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
4247 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
4248 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
4249 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
4250 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
4251 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
4252 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
4253 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
4254 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
4255 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
4257 static struct attribute *rbd_attrs[] = {
4258 &dev_attr_size.attr,
4259 &dev_attr_features.attr,
4260 &dev_attr_major.attr,
4261 &dev_attr_minor.attr,
4262 &dev_attr_client_addr.attr,
4263 &dev_attr_client_id.attr,
4264 &dev_attr_cluster_fsid.attr,
4265 &dev_attr_config_info.attr,
4266 &dev_attr_pool.attr,
4267 &dev_attr_pool_id.attr,
4268 &dev_attr_pool_ns.attr,
4269 &dev_attr_name.attr,
4270 &dev_attr_image_id.attr,
4271 &dev_attr_current_snap.attr,
4272 &dev_attr_snap_id.attr,
4273 &dev_attr_parent.attr,
4274 &dev_attr_refresh.attr,
4278 static struct attribute_group rbd_attr_group = {
4282 static const struct attribute_group *rbd_attr_groups[] = {
4287 static void rbd_dev_release(struct device *dev);
4289 static const struct device_type rbd_device_type = {
4291 .groups = rbd_attr_groups,
4292 .release = rbd_dev_release,
4295 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4297 kref_get(&spec->kref);
4302 static void rbd_spec_free(struct kref *kref);
4303 static void rbd_spec_put(struct rbd_spec *spec)
4306 kref_put(&spec->kref, rbd_spec_free);
4309 static struct rbd_spec *rbd_spec_alloc(void)
4311 struct rbd_spec *spec;
4313 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4317 spec->pool_id = CEPH_NOPOOL;
4318 spec->snap_id = CEPH_NOSNAP;
4319 kref_init(&spec->kref);
4324 static void rbd_spec_free(struct kref *kref)
4326 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4328 kfree(spec->pool_name);
4329 kfree(spec->pool_ns);
4330 kfree(spec->image_id);
4331 kfree(spec->image_name);
4332 kfree(spec->snap_name);
4336 static void rbd_dev_free(struct rbd_device *rbd_dev)
4338 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4339 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4341 ceph_oid_destroy(&rbd_dev->header_oid);
4342 ceph_oloc_destroy(&rbd_dev->header_oloc);
4343 kfree(rbd_dev->config_info);
4345 rbd_put_client(rbd_dev->rbd_client);
4346 rbd_spec_put(rbd_dev->spec);
4347 kfree(rbd_dev->opts);
4351 static void rbd_dev_release(struct device *dev)
4353 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4354 bool need_put = !!rbd_dev->opts;
4357 destroy_workqueue(rbd_dev->task_wq);
4358 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4361 rbd_dev_free(rbd_dev);
4364 * This is racy, but way better than putting module outside of
4365 * the release callback. The race window is pretty small, so
4366 * doing something similar to dm (dm-builtin.c) is overkill.
4369 module_put(THIS_MODULE);
4372 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4373 struct rbd_spec *spec)
4375 struct rbd_device *rbd_dev;
4377 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4381 spin_lock_init(&rbd_dev->lock);
4382 INIT_LIST_HEAD(&rbd_dev->node);
4383 init_rwsem(&rbd_dev->header_rwsem);
4385 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
4386 ceph_oid_init(&rbd_dev->header_oid);
4387 rbd_dev->header_oloc.pool = spec->pool_id;
4388 if (spec->pool_ns) {
4389 WARN_ON(!*spec->pool_ns);
4390 rbd_dev->header_oloc.pool_ns =
4391 ceph_find_or_create_string(spec->pool_ns,
4392 strlen(spec->pool_ns));
4395 mutex_init(&rbd_dev->watch_mutex);
4396 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4397 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4399 init_rwsem(&rbd_dev->lock_rwsem);
4400 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4401 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4402 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4403 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4404 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4405 init_waitqueue_head(&rbd_dev->lock_waitq);
4407 rbd_dev->dev.bus = &rbd_bus_type;
4408 rbd_dev->dev.type = &rbd_device_type;
4409 rbd_dev->dev.parent = &rbd_root_dev;
4410 device_initialize(&rbd_dev->dev);
4412 rbd_dev->rbd_client = rbdc;
4413 rbd_dev->spec = spec;
4419 * Create a mapping rbd_dev.
4421 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4422 struct rbd_spec *spec,
4423 struct rbd_options *opts)
4425 struct rbd_device *rbd_dev;
4427 rbd_dev = __rbd_dev_create(rbdc, spec);
4431 rbd_dev->opts = opts;
4433 /* get an id and fill in device name */
4434 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4435 minor_to_rbd_dev_id(1 << MINORBITS),
4437 if (rbd_dev->dev_id < 0)
4440 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4441 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4443 if (!rbd_dev->task_wq)
4446 /* we have a ref from do_rbd_add() */
4447 __module_get(THIS_MODULE);
4449 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4453 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4455 rbd_dev_free(rbd_dev);
4459 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4462 put_device(&rbd_dev->dev);
4466 * Get the size and object order for an image snapshot, or if
4467 * snap_id is CEPH_NOSNAP, gets this information for the base
4470 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4471 u8 *order, u64 *snap_size)
4473 __le64 snapid = cpu_to_le64(snap_id);
4478 } __attribute__ ((packed)) size_buf = { 0 };
4480 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4481 &rbd_dev->header_oloc, "get_size",
4482 &snapid, sizeof(snapid),
4483 &size_buf, sizeof(size_buf));
4484 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4487 if (ret < sizeof (size_buf))
4491 *order = size_buf.order;
4492 dout(" order %u", (unsigned int)*order);
4494 *snap_size = le64_to_cpu(size_buf.size);
4496 dout(" snap_id 0x%016llx snap_size = %llu\n",
4497 (unsigned long long)snap_id,
4498 (unsigned long long)*snap_size);
4503 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4505 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4506 &rbd_dev->header.obj_order,
4507 &rbd_dev->header.image_size);
4510 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4516 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4520 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4521 &rbd_dev->header_oloc, "get_object_prefix",
4522 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4523 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4528 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4529 p + ret, NULL, GFP_NOIO);
4532 if (IS_ERR(rbd_dev->header.object_prefix)) {
4533 ret = PTR_ERR(rbd_dev->header.object_prefix);
4534 rbd_dev->header.object_prefix = NULL;
4536 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4544 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4547 __le64 snapid = cpu_to_le64(snap_id);
4551 } __attribute__ ((packed)) features_buf = { 0 };
4555 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4556 &rbd_dev->header_oloc, "get_features",
4557 &snapid, sizeof(snapid),
4558 &features_buf, sizeof(features_buf));
4559 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4562 if (ret < sizeof (features_buf))
4565 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4567 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4572 *snap_features = le64_to_cpu(features_buf.features);
4574 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4575 (unsigned long long)snap_id,
4576 (unsigned long long)*snap_features,
4577 (unsigned long long)le64_to_cpu(features_buf.incompat));
4582 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4584 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4585 &rbd_dev->header.features);
4588 struct parent_image_info {
4590 const char *pool_ns;
4591 const char *image_id;
4599 * The caller is responsible for @pii.
4601 static int decode_parent_image_spec(void **p, void *end,
4602 struct parent_image_info *pii)
4608 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4609 &struct_v, &struct_len);
4613 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4614 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4615 if (IS_ERR(pii->pool_ns)) {
4616 ret = PTR_ERR(pii->pool_ns);
4617 pii->pool_ns = NULL;
4620 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4621 if (IS_ERR(pii->image_id)) {
4622 ret = PTR_ERR(pii->image_id);
4623 pii->image_id = NULL;
4626 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4633 static int __get_parent_info(struct rbd_device *rbd_dev,
4634 struct page *req_page,
4635 struct page *reply_page,
4636 struct parent_image_info *pii)
4638 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4639 size_t reply_len = PAGE_SIZE;
4643 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4644 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4645 req_page, sizeof(u64), reply_page, &reply_len);
4647 return ret == -EOPNOTSUPP ? 1 : ret;
4649 p = page_address(reply_page);
4650 end = p + reply_len;
4651 ret = decode_parent_image_spec(&p, end, pii);
4655 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4656 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4657 req_page, sizeof(u64), reply_page, &reply_len);
4661 p = page_address(reply_page);
4662 end = p + reply_len;
4663 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4664 if (pii->has_overlap)
4665 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4674 * The caller is responsible for @pii.
4676 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4677 struct page *req_page,
4678 struct page *reply_page,
4679 struct parent_image_info *pii)
4681 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4682 size_t reply_len = PAGE_SIZE;
4686 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4687 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4688 req_page, sizeof(u64), reply_page, &reply_len);
4692 p = page_address(reply_page);
4693 end = p + reply_len;
4694 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4695 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4696 if (IS_ERR(pii->image_id)) {
4697 ret = PTR_ERR(pii->image_id);
4698 pii->image_id = NULL;
4701 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
4702 pii->has_overlap = true;
4703 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4711 static int get_parent_info(struct rbd_device *rbd_dev,
4712 struct parent_image_info *pii)
4714 struct page *req_page, *reply_page;
4718 req_page = alloc_page(GFP_KERNEL);
4722 reply_page = alloc_page(GFP_KERNEL);
4724 __free_page(req_page);
4728 p = page_address(req_page);
4729 ceph_encode_64(&p, rbd_dev->spec->snap_id);
4730 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4732 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4735 __free_page(req_page);
4736 __free_page(reply_page);
4740 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4742 struct rbd_spec *parent_spec;
4743 struct parent_image_info pii = { 0 };
4746 parent_spec = rbd_spec_alloc();
4750 ret = get_parent_info(rbd_dev, &pii);
4754 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4755 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4756 pii.has_overlap, pii.overlap);
4758 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
4760 * Either the parent never existed, or we have
4761 * record of it but the image got flattened so it no
4762 * longer has a parent. When the parent of a
4763 * layered image disappears we immediately set the
4764 * overlap to 0. The effect of this is that all new
4765 * requests will be treated as if the image had no
4768 * If !pii.has_overlap, the parent image spec is not
4769 * applicable. It's there to avoid duplication in each
4772 if (rbd_dev->parent_overlap) {
4773 rbd_dev->parent_overlap = 0;
4774 rbd_dev_parent_put(rbd_dev);
4775 pr_info("%s: clone image has been flattened\n",
4776 rbd_dev->disk->disk_name);
4779 goto out; /* No parent? No problem. */
4782 /* The ceph file layout needs to fit pool id in 32 bits */
4785 if (pii.pool_id > (u64)U32_MAX) {
4786 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4787 (unsigned long long)pii.pool_id, U32_MAX);
4792 * The parent won't change (except when the clone is
4793 * flattened, already handled that). So we only need to
4794 * record the parent spec we have not already done so.
4796 if (!rbd_dev->parent_spec) {
4797 parent_spec->pool_id = pii.pool_id;
4798 if (pii.pool_ns && *pii.pool_ns) {
4799 parent_spec->pool_ns = pii.pool_ns;
4802 parent_spec->image_id = pii.image_id;
4803 pii.image_id = NULL;
4804 parent_spec->snap_id = pii.snap_id;
4806 rbd_dev->parent_spec = parent_spec;
4807 parent_spec = NULL; /* rbd_dev now owns this */
4811 * We always update the parent overlap. If it's zero we issue
4812 * a warning, as we will proceed as if there was no parent.
4816 /* refresh, careful to warn just once */
4817 if (rbd_dev->parent_overlap)
4819 "clone now standalone (overlap became 0)");
4822 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4825 rbd_dev->parent_overlap = pii.overlap;
4831 kfree(pii.image_id);
4832 rbd_spec_put(parent_spec);
4836 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4840 __le64 stripe_count;
4841 } __attribute__ ((packed)) striping_info_buf = { 0 };
4842 size_t size = sizeof (striping_info_buf);
4846 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4847 &rbd_dev->header_oloc, "get_stripe_unit_count",
4848 NULL, 0, &striping_info_buf, size);
4849 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4855 p = &striping_info_buf;
4856 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
4857 rbd_dev->header.stripe_count = ceph_decode_64(&p);
4861 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
4863 __le64 data_pool_id;
4866 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4867 &rbd_dev->header_oloc, "get_data_pool",
4868 NULL, 0, &data_pool_id, sizeof(data_pool_id));
4871 if (ret < sizeof(data_pool_id))
4874 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
4875 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
4879 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4881 CEPH_DEFINE_OID_ONSTACK(oid);
4882 size_t image_id_size;
4887 void *reply_buf = NULL;
4889 char *image_name = NULL;
4892 rbd_assert(!rbd_dev->spec->image_name);
4894 len = strlen(rbd_dev->spec->image_id);
4895 image_id_size = sizeof (__le32) + len;
4896 image_id = kmalloc(image_id_size, GFP_KERNEL);
4901 end = image_id + image_id_size;
4902 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4904 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4905 reply_buf = kmalloc(size, GFP_KERNEL);
4909 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
4910 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
4911 "dir_get_name", image_id, image_id_size,
4916 end = reply_buf + ret;
4918 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4919 if (IS_ERR(image_name))
4922 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4930 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4932 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4933 const char *snap_name;
4936 /* Skip over names until we find the one we are looking for */
4938 snap_name = rbd_dev->header.snap_names;
4939 while (which < snapc->num_snaps) {
4940 if (!strcmp(name, snap_name))
4941 return snapc->snaps[which];
4942 snap_name += strlen(snap_name) + 1;
4948 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4950 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4955 for (which = 0; !found && which < snapc->num_snaps; which++) {
4956 const char *snap_name;
4958 snap_id = snapc->snaps[which];
4959 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4960 if (IS_ERR(snap_name)) {
4961 /* ignore no-longer existing snapshots */
4962 if (PTR_ERR(snap_name) == -ENOENT)
4967 found = !strcmp(name, snap_name);
4970 return found ? snap_id : CEPH_NOSNAP;
4974 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4975 * no snapshot by that name is found, or if an error occurs.
4977 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4979 if (rbd_dev->image_format == 1)
4980 return rbd_v1_snap_id_by_name(rbd_dev, name);
4982 return rbd_v2_snap_id_by_name(rbd_dev, name);
4986 * An image being mapped will have everything but the snap id.
4988 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4990 struct rbd_spec *spec = rbd_dev->spec;
4992 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4993 rbd_assert(spec->image_id && spec->image_name);
4994 rbd_assert(spec->snap_name);
4996 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4999 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5000 if (snap_id == CEPH_NOSNAP)
5003 spec->snap_id = snap_id;
5005 spec->snap_id = CEPH_NOSNAP;
5012 * A parent image will have all ids but none of the names.
5014 * All names in an rbd spec are dynamically allocated. It's OK if we
5015 * can't figure out the name for an image id.
5017 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5019 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5020 struct rbd_spec *spec = rbd_dev->spec;
5021 const char *pool_name;
5022 const char *image_name;
5023 const char *snap_name;
5026 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5027 rbd_assert(spec->image_id);
5028 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5030 /* Get the pool name; we have to make our own copy of this */
5032 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5034 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5037 pool_name = kstrdup(pool_name, GFP_KERNEL);
5041 /* Fetch the image name; tolerate failure here */
5043 image_name = rbd_dev_image_name(rbd_dev);
5045 rbd_warn(rbd_dev, "unable to get image name");
5047 /* Fetch the snapshot name */
5049 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5050 if (IS_ERR(snap_name)) {
5051 ret = PTR_ERR(snap_name);
5055 spec->pool_name = pool_name;
5056 spec->image_name = image_name;
5057 spec->snap_name = snap_name;
5067 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5076 struct ceph_snap_context *snapc;
5080 * We'll need room for the seq value (maximum snapshot id),
5081 * snapshot count, and array of that many snapshot ids.
5082 * For now we have a fixed upper limit on the number we're
5083 * prepared to receive.
5085 size = sizeof (__le64) + sizeof (__le32) +
5086 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5087 reply_buf = kzalloc(size, GFP_KERNEL);
5091 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5092 &rbd_dev->header_oloc, "get_snapcontext",
5093 NULL, 0, reply_buf, size);
5094 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5099 end = reply_buf + ret;
5101 ceph_decode_64_safe(&p, end, seq, out);
5102 ceph_decode_32_safe(&p, end, snap_count, out);
5105 * Make sure the reported number of snapshot ids wouldn't go
5106 * beyond the end of our buffer. But before checking that,
5107 * make sure the computed size of the snapshot context we
5108 * allocate is representable in a size_t.
5110 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5115 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5119 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5125 for (i = 0; i < snap_count; i++)
5126 snapc->snaps[i] = ceph_decode_64(&p);
5128 ceph_put_snap_context(rbd_dev->header.snapc);
5129 rbd_dev->header.snapc = snapc;
5131 dout(" snap context seq = %llu, snap_count = %u\n",
5132 (unsigned long long)seq, (unsigned int)snap_count);
5139 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5150 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5151 reply_buf = kmalloc(size, GFP_KERNEL);
5153 return ERR_PTR(-ENOMEM);
5155 snapid = cpu_to_le64(snap_id);
5156 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5157 &rbd_dev->header_oloc, "get_snapshot_name",
5158 &snapid, sizeof(snapid), reply_buf, size);
5159 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5161 snap_name = ERR_PTR(ret);
5166 end = reply_buf + ret;
5167 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5168 if (IS_ERR(snap_name))
5171 dout(" snap_id 0x%016llx snap_name = %s\n",
5172 (unsigned long long)snap_id, snap_name);
5179 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5181 bool first_time = rbd_dev->header.object_prefix == NULL;
5184 ret = rbd_dev_v2_image_size(rbd_dev);
5189 ret = rbd_dev_v2_header_onetime(rbd_dev);
5194 ret = rbd_dev_v2_snap_context(rbd_dev);
5195 if (ret && first_time) {
5196 kfree(rbd_dev->header.object_prefix);
5197 rbd_dev->header.object_prefix = NULL;
5203 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5205 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5207 if (rbd_dev->image_format == 1)
5208 return rbd_dev_v1_header_info(rbd_dev);
5210 return rbd_dev_v2_header_info(rbd_dev);
5214 * Skips over white space at *buf, and updates *buf to point to the
5215 * first found non-space character (if any). Returns the length of
5216 * the token (string of non-white space characters) found. Note
5217 * that *buf must be terminated with '\0'.
5219 static inline size_t next_token(const char **buf)
5222 * These are the characters that produce nonzero for
5223 * isspace() in the "C" and "POSIX" locales.
5225 const char *spaces = " \f\n\r\t\v";
5227 *buf += strspn(*buf, spaces); /* Find start of token */
5229 return strcspn(*buf, spaces); /* Return token length */
5233 * Finds the next token in *buf, dynamically allocates a buffer big
5234 * enough to hold a copy of it, and copies the token into the new
5235 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5236 * that a duplicate buffer is created even for a zero-length token.
5238 * Returns a pointer to the newly-allocated duplicate, or a null
5239 * pointer if memory for the duplicate was not available. If
5240 * the lenp argument is a non-null pointer, the length of the token
5241 * (not including the '\0') is returned in *lenp.
5243 * If successful, the *buf pointer will be updated to point beyond
5244 * the end of the found token.
5246 * Note: uses GFP_KERNEL for allocation.
5248 static inline char *dup_token(const char **buf, size_t *lenp)
5253 len = next_token(buf);
5254 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5257 *(dup + len) = '\0';
5267 * Parse the options provided for an "rbd add" (i.e., rbd image
5268 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5269 * and the data written is passed here via a NUL-terminated buffer.
5270 * Returns 0 if successful or an error code otherwise.
5272 * The information extracted from these options is recorded in
5273 * the other parameters which return dynamically-allocated
5276 * The address of a pointer that will refer to a ceph options
5277 * structure. Caller must release the returned pointer using
5278 * ceph_destroy_options() when it is no longer needed.
5280 * Address of an rbd options pointer. Fully initialized by
5281 * this function; caller must release with kfree().
5283 * Address of an rbd image specification pointer. Fully
5284 * initialized by this function based on parsed options.
5285 * Caller must release with rbd_spec_put().
5287 * The options passed take this form:
5288 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5291 * A comma-separated list of one or more monitor addresses.
5292 * A monitor address is an ip address, optionally followed
5293 * by a port number (separated by a colon).
5294 * I.e.: ip1[:port1][,ip2[:port2]...]
5296 * A comma-separated list of ceph and/or rbd options.
5298 * The name of the rados pool containing the rbd image.
5300 * The name of the image in that pool to map.
5302 * An optional snapshot id. If provided, the mapping will
5303 * present data from the image at the time that snapshot was
5304 * created. The image head is used if no snapshot id is
5305 * provided. Snapshot mappings are always read-only.
5307 static int rbd_add_parse_args(const char *buf,
5308 struct ceph_options **ceph_opts,
5309 struct rbd_options **opts,
5310 struct rbd_spec **rbd_spec)
5314 const char *mon_addrs;
5316 size_t mon_addrs_size;
5317 struct parse_rbd_opts_ctx pctx = { 0 };
5318 struct ceph_options *copts;
5321 /* The first four tokens are required */
5323 len = next_token(&buf);
5325 rbd_warn(NULL, "no monitor address(es) provided");
5329 mon_addrs_size = len + 1;
5333 options = dup_token(&buf, NULL);
5337 rbd_warn(NULL, "no options provided");
5341 pctx.spec = rbd_spec_alloc();
5345 pctx.spec->pool_name = dup_token(&buf, NULL);
5346 if (!pctx.spec->pool_name)
5348 if (!*pctx.spec->pool_name) {
5349 rbd_warn(NULL, "no pool name provided");
5353 pctx.spec->image_name = dup_token(&buf, NULL);
5354 if (!pctx.spec->image_name)
5356 if (!*pctx.spec->image_name) {
5357 rbd_warn(NULL, "no image name provided");
5362 * Snapshot name is optional; default is to use "-"
5363 * (indicating the head/no snapshot).
5365 len = next_token(&buf);
5367 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5368 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5369 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5370 ret = -ENAMETOOLONG;
5373 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5376 *(snap_name + len) = '\0';
5377 pctx.spec->snap_name = snap_name;
5379 /* Initialize all rbd options to the defaults */
5381 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
5385 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
5386 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5387 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5388 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5389 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5390 pctx.opts->trim = RBD_TRIM_DEFAULT;
5392 copts = ceph_parse_options(options, mon_addrs,
5393 mon_addrs + mon_addrs_size - 1,
5394 parse_rbd_opts_token, &pctx);
5395 if (IS_ERR(copts)) {
5396 ret = PTR_ERR(copts);
5403 *rbd_spec = pctx.spec;
5410 rbd_spec_put(pctx.spec);
5416 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5418 down_write(&rbd_dev->lock_rwsem);
5419 if (__rbd_is_lock_owner(rbd_dev))
5420 rbd_unlock(rbd_dev);
5421 up_write(&rbd_dev->lock_rwsem);
5424 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5428 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5429 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5433 /* FIXME: "rbd map --exclusive" should be in interruptible */
5434 down_read(&rbd_dev->lock_rwsem);
5435 ret = rbd_wait_state_locked(rbd_dev, true);
5436 up_read(&rbd_dev->lock_rwsem);
5438 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5446 * An rbd format 2 image has a unique identifier, distinct from the
5447 * name given to it by the user. Internally, that identifier is
5448 * what's used to specify the names of objects related to the image.
5450 * A special "rbd id" object is used to map an rbd image name to its
5451 * id. If that object doesn't exist, then there is no v2 rbd image
5452 * with the supplied name.
5454 * This function will record the given rbd_dev's image_id field if
5455 * it can be determined, and in that case will return 0. If any
5456 * errors occur a negative errno will be returned and the rbd_dev's
5457 * image_id field will be unchanged (and should be NULL).
5459 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5463 CEPH_DEFINE_OID_ONSTACK(oid);
5468 * When probing a parent image, the image id is already
5469 * known (and the image name likely is not). There's no
5470 * need to fetch the image id again in this case. We
5471 * do still need to set the image format though.
5473 if (rbd_dev->spec->image_id) {
5474 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5480 * First, see if the format 2 image id file exists, and if
5481 * so, get the image's persistent id from it.
5483 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5484 rbd_dev->spec->image_name);
5488 dout("rbd id object name is %s\n", oid.name);
5490 /* Response will be an encoded string, which includes a length */
5492 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5493 response = kzalloc(size, GFP_NOIO);
5499 /* If it doesn't exist we'll assume it's a format 1 image */
5501 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5503 response, RBD_IMAGE_ID_LEN_MAX);
5504 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5505 if (ret == -ENOENT) {
5506 image_id = kstrdup("", GFP_KERNEL);
5507 ret = image_id ? 0 : -ENOMEM;
5509 rbd_dev->image_format = 1;
5510 } else if (ret >= 0) {
5513 image_id = ceph_extract_encoded_string(&p, p + ret,
5515 ret = PTR_ERR_OR_ZERO(image_id);
5517 rbd_dev->image_format = 2;
5521 rbd_dev->spec->image_id = image_id;
5522 dout("image_id is %s\n", image_id);
5526 ceph_oid_destroy(&oid);
5531 * Undo whatever state changes are made by v1 or v2 header info
5534 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5536 struct rbd_image_header *header;
5538 rbd_dev_parent_put(rbd_dev);
5540 /* Free dynamic fields from the header, then zero it out */
5542 header = &rbd_dev->header;
5543 ceph_put_snap_context(header->snapc);
5544 kfree(header->snap_sizes);
5545 kfree(header->snap_names);
5546 kfree(header->object_prefix);
5547 memset(header, 0, sizeof (*header));
5550 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5554 ret = rbd_dev_v2_object_prefix(rbd_dev);
5559 * Get the and check features for the image. Currently the
5560 * features are assumed to never change.
5562 ret = rbd_dev_v2_features(rbd_dev);
5566 /* If the image supports fancy striping, get its parameters */
5568 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5569 ret = rbd_dev_v2_striping_info(rbd_dev);
5574 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5575 ret = rbd_dev_v2_data_pool(rbd_dev);
5580 rbd_init_layout(rbd_dev);
5584 rbd_dev->header.features = 0;
5585 kfree(rbd_dev->header.object_prefix);
5586 rbd_dev->header.object_prefix = NULL;
5591 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5592 * rbd_dev_image_probe() recursion depth, which means it's also the
5593 * length of the already discovered part of the parent chain.
5595 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5597 struct rbd_device *parent = NULL;
5600 if (!rbd_dev->parent_spec)
5603 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5604 pr_info("parent chain is too long (%d)\n", depth);
5609 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5616 * Images related by parent/child relationships always share
5617 * rbd_client and spec/parent_spec, so bump their refcounts.
5619 __rbd_get_client(rbd_dev->rbd_client);
5620 rbd_spec_get(rbd_dev->parent_spec);
5622 ret = rbd_dev_image_probe(parent, depth);
5626 rbd_dev->parent = parent;
5627 atomic_set(&rbd_dev->parent_ref, 1);
5631 rbd_dev_unparent(rbd_dev);
5632 rbd_dev_destroy(parent);
5636 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5638 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5639 rbd_dev_mapping_clear(rbd_dev);
5640 rbd_free_disk(rbd_dev);
5642 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5646 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5649 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5653 /* Record our major and minor device numbers. */
5655 if (!single_major) {
5656 ret = register_blkdev(0, rbd_dev->name);
5658 goto err_out_unlock;
5660 rbd_dev->major = ret;
5663 rbd_dev->major = rbd_major;
5664 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5667 /* Set up the blkdev mapping. */
5669 ret = rbd_init_disk(rbd_dev);
5671 goto err_out_blkdev;
5673 ret = rbd_dev_mapping_set(rbd_dev);
5677 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5678 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
5680 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5682 goto err_out_mapping;
5684 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5685 up_write(&rbd_dev->header_rwsem);
5689 rbd_dev_mapping_clear(rbd_dev);
5691 rbd_free_disk(rbd_dev);
5694 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5696 up_write(&rbd_dev->header_rwsem);
5700 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5702 struct rbd_spec *spec = rbd_dev->spec;
5705 /* Record the header object name for this rbd image. */
5707 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5708 if (rbd_dev->image_format == 1)
5709 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5710 spec->image_name, RBD_SUFFIX);
5712 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5713 RBD_HEADER_PREFIX, spec->image_id);
5718 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5720 rbd_dev_unprobe(rbd_dev);
5722 rbd_unregister_watch(rbd_dev);
5723 rbd_dev->image_format = 0;
5724 kfree(rbd_dev->spec->image_id);
5725 rbd_dev->spec->image_id = NULL;
5729 * Probe for the existence of the header object for the given rbd
5730 * device. If this image is the one being mapped (i.e., not a
5731 * parent), initiate a watch on its header object before using that
5732 * object to get detailed information about the rbd image.
5734 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5739 * Get the id from the image id object. Unless there's an
5740 * error, rbd_dev->spec->image_id will be filled in with
5741 * a dynamically-allocated string, and rbd_dev->image_format
5742 * will be set to either 1 or 2.
5744 ret = rbd_dev_image_id(rbd_dev);
5748 ret = rbd_dev_header_name(rbd_dev);
5750 goto err_out_format;
5753 ret = rbd_register_watch(rbd_dev);
5756 pr_info("image %s/%s%s%s does not exist\n",
5757 rbd_dev->spec->pool_name,
5758 rbd_dev->spec->pool_ns ?: "",
5759 rbd_dev->spec->pool_ns ? "/" : "",
5760 rbd_dev->spec->image_name);
5761 goto err_out_format;
5765 ret = rbd_dev_header_info(rbd_dev);
5770 * If this image is the one being mapped, we have pool name and
5771 * id, image name and id, and snap name - need to fill snap id.
5772 * Otherwise this is a parent image, identified by pool, image
5773 * and snap ids - need to fill in names for those ids.
5776 ret = rbd_spec_fill_snap_id(rbd_dev);
5778 ret = rbd_spec_fill_names(rbd_dev);
5781 pr_info("snap %s/%s%s%s@%s does not exist\n",
5782 rbd_dev->spec->pool_name,
5783 rbd_dev->spec->pool_ns ?: "",
5784 rbd_dev->spec->pool_ns ? "/" : "",
5785 rbd_dev->spec->image_name,
5786 rbd_dev->spec->snap_name);
5790 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5791 ret = rbd_dev_v2_parent_info(rbd_dev);
5796 * Need to warn users if this image is the one being
5797 * mapped and has a parent.
5799 if (!depth && rbd_dev->parent_spec)
5801 "WARNING: kernel layering is EXPERIMENTAL!");
5804 ret = rbd_dev_probe_parent(rbd_dev, depth);
5808 dout("discovered format %u image, header name is %s\n",
5809 rbd_dev->image_format, rbd_dev->header_oid.name);
5813 rbd_dev_unprobe(rbd_dev);
5816 rbd_unregister_watch(rbd_dev);
5818 rbd_dev->image_format = 0;
5819 kfree(rbd_dev->spec->image_id);
5820 rbd_dev->spec->image_id = NULL;
5824 static ssize_t do_rbd_add(struct bus_type *bus,
5828 struct rbd_device *rbd_dev = NULL;
5829 struct ceph_options *ceph_opts = NULL;
5830 struct rbd_options *rbd_opts = NULL;
5831 struct rbd_spec *spec = NULL;
5832 struct rbd_client *rbdc;
5835 if (!try_module_get(THIS_MODULE))
5838 /* parse add command */
5839 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5843 rbdc = rbd_get_client(ceph_opts);
5850 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
5853 pr_info("pool %s does not exist\n", spec->pool_name);
5854 goto err_out_client;
5856 spec->pool_id = (u64)rc;
5858 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5861 goto err_out_client;
5863 rbdc = NULL; /* rbd_dev now owns this */
5864 spec = NULL; /* rbd_dev now owns this */
5865 rbd_opts = NULL; /* rbd_dev now owns this */
5867 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
5868 if (!rbd_dev->config_info) {
5870 goto err_out_rbd_dev;
5873 down_write(&rbd_dev->header_rwsem);
5874 rc = rbd_dev_image_probe(rbd_dev, 0);
5876 up_write(&rbd_dev->header_rwsem);
5877 goto err_out_rbd_dev;
5880 /* If we are mapping a snapshot it must be marked read-only */
5881 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5882 rbd_dev->opts->read_only = true;
5884 rc = rbd_dev_device_setup(rbd_dev);
5886 goto err_out_image_probe;
5888 if (rbd_dev->opts->exclusive) {
5889 rc = rbd_add_acquire_lock(rbd_dev);
5891 goto err_out_device_setup;
5894 /* Everything's ready. Announce the disk to the world. */
5896 rc = device_add(&rbd_dev->dev);
5898 goto err_out_image_lock;
5900 add_disk(rbd_dev->disk);
5901 /* see rbd_init_disk() */
5902 blk_put_queue(rbd_dev->disk->queue);
5904 spin_lock(&rbd_dev_list_lock);
5905 list_add_tail(&rbd_dev->node, &rbd_dev_list);
5906 spin_unlock(&rbd_dev_list_lock);
5908 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
5909 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
5910 rbd_dev->header.features);
5913 module_put(THIS_MODULE);
5917 rbd_dev_image_unlock(rbd_dev);
5918 err_out_device_setup:
5919 rbd_dev_device_release(rbd_dev);
5920 err_out_image_probe:
5921 rbd_dev_image_release(rbd_dev);
5923 rbd_dev_destroy(rbd_dev);
5925 rbd_put_client(rbdc);
5932 static ssize_t rbd_add(struct bus_type *bus,
5939 return do_rbd_add(bus, buf, count);
5942 static ssize_t rbd_add_single_major(struct bus_type *bus,
5946 return do_rbd_add(bus, buf, count);
5949 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5951 while (rbd_dev->parent) {
5952 struct rbd_device *first = rbd_dev;
5953 struct rbd_device *second = first->parent;
5954 struct rbd_device *third;
5957 * Follow to the parent with no grandparent and
5960 while (second && (third = second->parent)) {
5965 rbd_dev_image_release(second);
5966 rbd_dev_destroy(second);
5967 first->parent = NULL;
5968 first->parent_overlap = 0;
5970 rbd_assert(first->parent_spec);
5971 rbd_spec_put(first->parent_spec);
5972 first->parent_spec = NULL;
5976 static ssize_t do_rbd_remove(struct bus_type *bus,
5980 struct rbd_device *rbd_dev = NULL;
5981 struct list_head *tmp;
5984 bool already = false;
5990 sscanf(buf, "%d %5s", &dev_id, opt_buf);
5992 pr_err("dev_id out of range\n");
5995 if (opt_buf[0] != '\0') {
5996 if (!strcmp(opt_buf, "force")) {
5999 pr_err("bad remove option at '%s'\n", opt_buf);
6005 spin_lock(&rbd_dev_list_lock);
6006 list_for_each(tmp, &rbd_dev_list) {
6007 rbd_dev = list_entry(tmp, struct rbd_device, node);
6008 if (rbd_dev->dev_id == dev_id) {
6014 spin_lock_irq(&rbd_dev->lock);
6015 if (rbd_dev->open_count && !force)
6018 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6020 spin_unlock_irq(&rbd_dev->lock);
6022 spin_unlock(&rbd_dev_list_lock);
6023 if (ret < 0 || already)
6028 * Prevent new IO from being queued and wait for existing
6029 * IO to complete/fail.
6031 blk_mq_freeze_queue(rbd_dev->disk->queue);
6032 blk_set_queue_dying(rbd_dev->disk->queue);
6035 del_gendisk(rbd_dev->disk);
6036 spin_lock(&rbd_dev_list_lock);
6037 list_del_init(&rbd_dev->node);
6038 spin_unlock(&rbd_dev_list_lock);
6039 device_del(&rbd_dev->dev);
6041 rbd_dev_image_unlock(rbd_dev);
6042 rbd_dev_device_release(rbd_dev);
6043 rbd_dev_image_release(rbd_dev);
6044 rbd_dev_destroy(rbd_dev);
6048 static ssize_t rbd_remove(struct bus_type *bus,
6055 return do_rbd_remove(bus, buf, count);
6058 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6062 return do_rbd_remove(bus, buf, count);
6066 * create control files in sysfs
6069 static int __init rbd_sysfs_init(void)
6073 ret = device_register(&rbd_root_dev);
6077 ret = bus_register(&rbd_bus_type);
6079 device_unregister(&rbd_root_dev);
6084 static void __exit rbd_sysfs_cleanup(void)
6086 bus_unregister(&rbd_bus_type);
6087 device_unregister(&rbd_root_dev);
6090 static int __init rbd_slab_init(void)
6092 rbd_assert(!rbd_img_request_cache);
6093 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6094 if (!rbd_img_request_cache)
6097 rbd_assert(!rbd_obj_request_cache);
6098 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6099 if (!rbd_obj_request_cache)
6105 kmem_cache_destroy(rbd_img_request_cache);
6106 rbd_img_request_cache = NULL;
6110 static void rbd_slab_exit(void)
6112 rbd_assert(rbd_obj_request_cache);
6113 kmem_cache_destroy(rbd_obj_request_cache);
6114 rbd_obj_request_cache = NULL;
6116 rbd_assert(rbd_img_request_cache);
6117 kmem_cache_destroy(rbd_img_request_cache);
6118 rbd_img_request_cache = NULL;
6121 static int __init rbd_init(void)
6125 if (!libceph_compatible(NULL)) {
6126 rbd_warn(NULL, "libceph incompatibility (quitting)");
6130 rc = rbd_slab_init();
6135 * The number of active work items is limited by the number of
6136 * rbd devices * queue depth, so leave @max_active at default.
6138 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6145 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6146 if (rbd_major < 0) {
6152 rc = rbd_sysfs_init();
6154 goto err_out_blkdev;
6157 pr_info("loaded (major %d)\n", rbd_major);
6159 pr_info("loaded\n");
6165 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6167 destroy_workqueue(rbd_wq);
6173 static void __exit rbd_exit(void)
6175 ida_destroy(&rbd_dev_id_ida);
6176 rbd_sysfs_cleanup();
6178 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6179 destroy_workqueue(rbd_wq);
6183 module_init(rbd_init);
6184 module_exit(rbd_exit);
6186 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6187 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6188 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6189 /* following authorship retained from original osdblk.c */
6190 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6192 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6193 MODULE_LICENSE("GPL");