2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
11 #include "dm-path-selector.h"
12 #include "dm-uevent.h"
14 #include <linux/blkdev.h>
15 #include <linux/ctype.h>
16 #include <linux/init.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/pagemap.h>
20 #include <linux/slab.h>
21 #include <linux/time.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/atomic.h>
27 #define DM_MSG_PREFIX "multipath"
28 #define DM_PG_INIT_DELAY_MSECS 2000
29 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
33 struct list_head list;
35 struct priority_group *pg; /* Owning PG */
36 unsigned is_active; /* Path status */
37 unsigned fail_count; /* Cumulative failure count */
40 struct delayed_work activate_path;
43 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
46 * Paths are grouped into Priority Groups and numbered from 1 upwards.
47 * Each has a path selector which controls which path gets used.
49 struct priority_group {
50 struct list_head list;
52 struct multipath *m; /* Owning multipath instance */
53 struct path_selector ps;
55 unsigned pg_num; /* Reference number */
56 unsigned bypassed; /* Temporarily bypass this PG? */
58 unsigned nr_pgpaths; /* Number of paths in PG */
59 struct list_head pgpaths;
62 /* Multipath context */
64 struct list_head list;
67 const char *hw_handler_name;
68 char *hw_handler_params;
72 unsigned nr_priority_groups;
73 struct list_head priority_groups;
75 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
77 unsigned pg_init_required; /* pg_init needs calling? */
78 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
79 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
81 unsigned nr_valid_paths; /* Total number of usable paths */
82 struct pgpath *current_pgpath;
83 struct priority_group *current_pg;
84 struct priority_group *next_pg; /* Switch to this PG if set */
85 unsigned repeat_count; /* I/Os left before calling PS again */
87 unsigned queue_io:1; /* Must we queue all I/O? */
88 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
89 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
90 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
91 unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
93 unsigned pg_init_retries; /* Number of times to retry pg_init */
94 unsigned pg_init_count; /* Number of times pg_init called */
95 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
97 struct work_struct trigger_event;
100 * We must use a mempool of dm_mpath_io structs so that we
101 * can resubmit bios on error.
103 mempool_t *mpio_pool;
105 struct mutex work_mutex;
109 * Context information attached to each bio we process.
112 struct pgpath *pgpath;
116 typedef int (*action_fn) (struct pgpath *pgpath);
118 static struct kmem_cache *_mpio_cache;
120 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
121 static void trigger_event(struct work_struct *work);
122 static void activate_path(struct work_struct *work);
123 static int __pgpath_busy(struct pgpath *pgpath);
126 /*-----------------------------------------------
127 * Allocation routines
128 *-----------------------------------------------*/
130 static struct pgpath *alloc_pgpath(void)
132 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
135 pgpath->is_active = 1;
136 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
142 static void free_pgpath(struct pgpath *pgpath)
147 static struct priority_group *alloc_priority_group(void)
149 struct priority_group *pg;
151 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
154 INIT_LIST_HEAD(&pg->pgpaths);
159 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
161 struct pgpath *pgpath, *tmp;
163 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
164 list_del(&pgpath->list);
165 dm_put_device(ti, pgpath->path.dev);
170 static void free_priority_group(struct priority_group *pg,
171 struct dm_target *ti)
173 struct path_selector *ps = &pg->ps;
176 ps->type->destroy(ps);
177 dm_put_path_selector(ps->type);
180 free_pgpaths(&pg->pgpaths, ti);
184 static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
188 m = kzalloc(sizeof(*m), GFP_KERNEL);
190 INIT_LIST_HEAD(&m->priority_groups);
191 spin_lock_init(&m->lock);
193 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
194 INIT_WORK(&m->trigger_event, trigger_event);
195 init_waitqueue_head(&m->pg_init_wait);
196 mutex_init(&m->work_mutex);
200 unsigned min_ios = dm_get_reserved_rq_based_ios();
202 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
216 static void free_multipath(struct multipath *m)
218 struct priority_group *pg, *tmp;
220 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
222 free_priority_group(pg, m->ti);
225 kfree(m->hw_handler_name);
226 kfree(m->hw_handler_params);
227 mempool_destroy(m->mpio_pool);
231 static struct dm_mpath_io *get_mpio(union map_info *info)
236 static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
238 struct dm_mpath_io *mpio;
241 /* Use blk-mq pdu memory requested via per_io_data_size */
242 mpio = get_mpio(info);
243 memset(mpio, 0, sizeof(*mpio));
247 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
251 memset(mpio, 0, sizeof(*mpio));
257 static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
259 /* Only needed for non blk-mq (.request_fn) multipath */
261 struct dm_mpath_io *mpio = info->ptr;
264 mempool_free(mpio, m->mpio_pool);
268 /*-----------------------------------------------
270 *-----------------------------------------------*/
272 static int __pg_init_all_paths(struct multipath *m)
274 struct pgpath *pgpath;
275 unsigned long pg_init_delay = 0;
277 if (m->pg_init_in_progress || m->pg_init_disabled)
281 m->pg_init_required = 0;
283 /* Check here to reset pg_init_required */
287 if (m->pg_init_delay_retry)
288 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
289 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
290 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
291 /* Skip failed paths */
292 if (!pgpath->is_active)
294 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
296 m->pg_init_in_progress++;
298 return m->pg_init_in_progress;
301 static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
303 m->current_pg = pgpath->pg;
305 /* Must we initialise the PG first, and queue I/O till it's ready? */
306 if (m->hw_handler_name) {
307 m->pg_init_required = 1;
310 m->pg_init_required = 0;
314 m->pg_init_count = 0;
317 static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
320 struct dm_path *path;
322 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
326 m->current_pgpath = path_to_pgpath(path);
328 if (m->current_pg != pg)
329 __switch_pg(m, m->current_pgpath);
334 static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
336 struct priority_group *pg;
337 unsigned bypassed = 1;
339 if (!m->nr_valid_paths) {
344 /* Were we instructed to switch PG? */
348 if (!__choose_path_in_pg(m, pg, nr_bytes))
352 /* Don't change PG until it has no remaining paths */
353 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
357 * Loop through priority groups until we find a valid path.
358 * First time we skip PGs marked 'bypassed'.
359 * Second time we only try the ones we skipped, but set
360 * pg_init_delay_retry so we do not hammer controllers.
363 list_for_each_entry(pg, &m->priority_groups, list) {
364 if (pg->bypassed == bypassed)
366 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
368 m->pg_init_delay_retry = 1;
372 } while (bypassed--);
375 m->current_pgpath = NULL;
376 m->current_pg = NULL;
380 * Check whether bios must be queued in the device-mapper core rather
381 * than here in the target.
383 * m->lock must be held on entry.
385 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
386 * same value then we are not between multipath_presuspend()
387 * and multipath_resume() calls and we have no need to check
388 * for the DMF_NOFLUSH_SUSPENDING flag.
390 static int __must_push_back(struct multipath *m)
392 return (m->queue_if_no_path ||
393 (m->queue_if_no_path != m->saved_queue_if_no_path &&
394 dm_noflush_suspending(m->ti)));
398 * Map cloned requests
400 static int __multipath_map(struct dm_target *ti, struct request *clone,
401 union map_info *map_context,
402 struct request *rq, struct request **__clone)
404 struct multipath *m = (struct multipath *) ti->private;
405 int r = DM_MAPIO_REQUEUE;
406 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
407 struct pgpath *pgpath;
408 struct block_device *bdev;
409 struct dm_mpath_io *mpio;
411 spin_lock_irq(&m->lock);
413 /* Do we need to select a new pgpath? */
414 if (!m->current_pgpath ||
415 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
416 __choose_pgpath(m, nr_bytes);
418 pgpath = m->current_pgpath;
421 if (!__must_push_back(m))
422 r = -EIO; /* Failed */
424 } else if (m->queue_io || m->pg_init_required) {
425 __pg_init_all_paths(m);
429 mpio = set_mpio(m, map_context);
431 /* ENOMEM, requeue */
434 mpio->pgpath = pgpath;
435 mpio->nr_bytes = nr_bytes;
437 bdev = pgpath->path.dev->bdev;
439 spin_unlock_irq(&m->lock);
443 * Old request-based interface: allocated clone is passed in.
444 * Used by: .request_fn stacked on .request_fn path(s).
446 clone->q = bdev_get_queue(bdev);
447 clone->rq_disk = bdev->bd_disk;
448 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
451 * blk-mq request-based interface; used by both:
452 * .request_fn stacked on blk-mq path(s) and
453 * blk-mq stacked on blk-mq path(s).
455 *__clone = blk_get_request(bdev_get_queue(bdev),
456 rq_data_dir(rq), GFP_ATOMIC);
457 if (IS_ERR(*__clone)) {
458 /* ENOMEM, requeue */
459 clear_request_fn_mpio(m, map_context);
462 (*__clone)->bio = (*__clone)->biotail = NULL;
463 (*__clone)->rq_disk = bdev->bd_disk;
464 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
467 if (pgpath->pg->ps.type->start_io)
468 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
471 return DM_MAPIO_REMAPPED;
474 spin_unlock_irq(&m->lock);
479 static int multipath_map(struct dm_target *ti, struct request *clone,
480 union map_info *map_context)
482 return __multipath_map(ti, clone, map_context, NULL, NULL);
485 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
486 union map_info *map_context,
487 struct request **clone)
489 return __multipath_map(ti, NULL, map_context, rq, clone);
492 static void multipath_release_clone(struct request *clone)
494 blk_put_request(clone);
498 * If we run out of usable paths, should we queue I/O or error it?
500 static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
501 unsigned save_old_value)
505 spin_lock_irqsave(&m->lock, flags);
508 m->saved_queue_if_no_path = m->queue_if_no_path;
510 m->saved_queue_if_no_path = queue_if_no_path;
511 m->queue_if_no_path = queue_if_no_path;
512 spin_unlock_irqrestore(&m->lock, flags);
514 if (!queue_if_no_path)
515 dm_table_run_md_queue_async(m->ti->table);
521 * An event is triggered whenever a path is taken out of use.
522 * Includes path failure and PG bypass.
524 static void trigger_event(struct work_struct *work)
526 struct multipath *m =
527 container_of(work, struct multipath, trigger_event);
529 dm_table_event(m->ti->table);
532 /*-----------------------------------------------------------------
533 * Constructor/argument parsing:
534 * <#multipath feature args> [<arg>]*
535 * <#hw_handler args> [hw_handler [<arg>]*]
537 * <initial priority group>
538 * [<selector> <#selector args> [<arg>]*
539 * <#paths> <#per-path selector args>
540 * [<path> [<arg>]* ]+ ]+
541 *---------------------------------------------------------------*/
542 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
543 struct dm_target *ti)
546 struct path_selector_type *pst;
549 static struct dm_arg _args[] = {
550 {0, 1024, "invalid number of path selector args"},
553 pst = dm_get_path_selector(dm_shift_arg(as));
555 ti->error = "unknown path selector type";
559 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
561 dm_put_path_selector(pst);
565 r = pst->create(&pg->ps, ps_argc, as->argv);
567 dm_put_path_selector(pst);
568 ti->error = "path selector constructor failed";
573 dm_consume_args(as, ps_argc);
578 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
579 struct dm_target *ti)
583 struct multipath *m = ti->private;
584 struct request_queue *q = NULL;
585 const char *attached_handler_name;
587 /* we need at least a path arg */
589 ti->error = "no device given";
590 return ERR_PTR(-EINVAL);
595 return ERR_PTR(-ENOMEM);
597 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
600 ti->error = "error getting device";
604 if (m->retain_attached_hw_handler || m->hw_handler_name)
605 q = bdev_get_queue(p->path.dev->bdev);
607 if (m->retain_attached_hw_handler) {
609 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
610 if (attached_handler_name) {
612 * Reset hw_handler_name to match the attached handler
613 * and clear any hw_handler_params associated with the
616 * NB. This modifies the table line to show the actual
617 * handler instead of the original table passed in.
619 kfree(m->hw_handler_name);
620 m->hw_handler_name = attached_handler_name;
622 kfree(m->hw_handler_params);
623 m->hw_handler_params = NULL;
627 if (m->hw_handler_name) {
628 r = scsi_dh_attach(q, m->hw_handler_name);
630 char b[BDEVNAME_SIZE];
632 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
633 bdevname(p->path.dev->bdev, b));
637 ti->error = "error attaching hardware handler";
638 dm_put_device(ti, p->path.dev);
642 if (m->hw_handler_params) {
643 r = scsi_dh_set_params(q, m->hw_handler_params);
645 ti->error = "unable to set hardware "
646 "handler parameters";
647 dm_put_device(ti, p->path.dev);
653 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
655 dm_put_device(ti, p->path.dev);
666 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
669 static struct dm_arg _args[] = {
670 {1, 1024, "invalid number of paths"},
671 {0, 1024, "invalid number of selector args"}
675 unsigned i, nr_selector_args, nr_args;
676 struct priority_group *pg;
677 struct dm_target *ti = m->ti;
681 ti->error = "not enough priority group arguments";
682 return ERR_PTR(-EINVAL);
685 pg = alloc_priority_group();
687 ti->error = "couldn't allocate priority group";
688 return ERR_PTR(-ENOMEM);
692 r = parse_path_selector(as, pg, ti);
699 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
703 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
707 nr_args = 1 + nr_selector_args;
708 for (i = 0; i < pg->nr_pgpaths; i++) {
709 struct pgpath *pgpath;
710 struct dm_arg_set path_args;
712 if (as->argc < nr_args) {
713 ti->error = "not enough path parameters";
718 path_args.argc = nr_args;
719 path_args.argv = as->argv;
721 pgpath = parse_path(&path_args, &pg->ps, ti);
722 if (IS_ERR(pgpath)) {
728 list_add_tail(&pgpath->list, &pg->pgpaths);
729 dm_consume_args(as, nr_args);
735 free_priority_group(pg, ti);
739 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
743 struct dm_target *ti = m->ti;
745 static struct dm_arg _args[] = {
746 {0, 1024, "invalid number of hardware handler args"},
749 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
755 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
761 for (i = 0; i <= hw_argc - 2; i++)
762 len += strlen(as->argv[i]) + 1;
763 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
765 ti->error = "memory allocation failed";
769 j = sprintf(p, "%d", hw_argc - 1);
770 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
771 j = sprintf(p, "%s", as->argv[i]);
773 dm_consume_args(as, hw_argc - 1);
777 kfree(m->hw_handler_name);
778 m->hw_handler_name = NULL;
782 static int parse_features(struct dm_arg_set *as, struct multipath *m)
786 struct dm_target *ti = m->ti;
787 const char *arg_name;
789 static struct dm_arg _args[] = {
790 {0, 6, "invalid number of feature args"},
791 {1, 50, "pg_init_retries must be between 1 and 50"},
792 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
795 r = dm_read_arg_group(_args, as, &argc, &ti->error);
803 arg_name = dm_shift_arg(as);
806 if (!strcasecmp(arg_name, "queue_if_no_path")) {
807 r = queue_if_no_path(m, 1, 0);
811 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
812 m->retain_attached_hw_handler = 1;
816 if (!strcasecmp(arg_name, "pg_init_retries") &&
818 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
823 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
825 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
830 ti->error = "Unrecognised multipath feature request";
832 } while (argc && !r);
837 static int multipath_ctr(struct dm_target *ti, unsigned int argc,
840 /* target arguments */
841 static struct dm_arg _args[] = {
842 {0, 1024, "invalid number of priority groups"},
843 {0, 1024, "invalid initial priority group number"},
848 struct dm_arg_set as;
849 unsigned pg_count = 0;
850 unsigned next_pg_num;
851 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
856 m = alloc_multipath(ti, use_blk_mq);
858 ti->error = "can't allocate multipath";
862 r = parse_features(&as, m);
866 r = parse_hw_handler(&as, m);
870 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
874 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
878 if ((!m->nr_priority_groups && next_pg_num) ||
879 (m->nr_priority_groups && !next_pg_num)) {
880 ti->error = "invalid initial priority group";
885 /* parse the priority groups */
887 struct priority_group *pg;
889 pg = parse_priority_group(&as, m);
895 m->nr_valid_paths += pg->nr_pgpaths;
896 list_add_tail(&pg->list, &m->priority_groups);
898 pg->pg_num = pg_count;
903 if (pg_count != m->nr_priority_groups) {
904 ti->error = "priority group count mismatch";
909 ti->num_flush_bios = 1;
910 ti->num_discard_bios = 1;
911 ti->num_write_same_bios = 1;
913 ti->per_io_data_size = sizeof(struct dm_mpath_io);
922 static void multipath_wait_for_pg_init_completion(struct multipath *m)
924 DECLARE_WAITQUEUE(wait, current);
927 add_wait_queue(&m->pg_init_wait, &wait);
930 set_current_state(TASK_UNINTERRUPTIBLE);
932 spin_lock_irqsave(&m->lock, flags);
933 if (!m->pg_init_in_progress) {
934 spin_unlock_irqrestore(&m->lock, flags);
937 spin_unlock_irqrestore(&m->lock, flags);
941 set_current_state(TASK_RUNNING);
943 remove_wait_queue(&m->pg_init_wait, &wait);
946 static void flush_multipath_work(struct multipath *m)
950 spin_lock_irqsave(&m->lock, flags);
951 m->pg_init_disabled = 1;
952 spin_unlock_irqrestore(&m->lock, flags);
954 flush_workqueue(kmpath_handlerd);
955 multipath_wait_for_pg_init_completion(m);
956 flush_workqueue(kmultipathd);
957 flush_work(&m->trigger_event);
959 spin_lock_irqsave(&m->lock, flags);
960 m->pg_init_disabled = 0;
961 spin_unlock_irqrestore(&m->lock, flags);
964 static void multipath_dtr(struct dm_target *ti)
966 struct multipath *m = ti->private;
968 flush_multipath_work(m);
973 * Take a path out of use.
975 static int fail_path(struct pgpath *pgpath)
978 struct multipath *m = pgpath->pg->m;
980 spin_lock_irqsave(&m->lock, flags);
982 if (!pgpath->is_active)
985 DMWARN("Failing path %s.", pgpath->path.dev->name);
987 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
988 pgpath->is_active = 0;
989 pgpath->fail_count++;
993 if (pgpath == m->current_pgpath)
994 m->current_pgpath = NULL;
996 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
997 pgpath->path.dev->name, m->nr_valid_paths);
999 schedule_work(&m->trigger_event);
1002 spin_unlock_irqrestore(&m->lock, flags);
1008 * Reinstate a previously-failed path
1010 static int reinstate_path(struct pgpath *pgpath)
1012 int r = 0, run_queue = 0;
1013 unsigned long flags;
1014 struct multipath *m = pgpath->pg->m;
1016 spin_lock_irqsave(&m->lock, flags);
1018 if (pgpath->is_active)
1021 if (!pgpath->pg->ps.type->reinstate_path) {
1022 DMWARN("Reinstate path not supported by path selector %s",
1023 pgpath->pg->ps.type->name);
1028 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1032 pgpath->is_active = 1;
1034 if (!m->nr_valid_paths++) {
1035 m->current_pgpath = NULL;
1037 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1038 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1039 m->pg_init_in_progress++;
1042 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1043 pgpath->path.dev->name, m->nr_valid_paths);
1045 schedule_work(&m->trigger_event);
1048 spin_unlock_irqrestore(&m->lock, flags);
1050 dm_table_run_md_queue_async(m->ti->table);
1056 * Fail or reinstate all paths that match the provided struct dm_dev.
1058 static int action_dev(struct multipath *m, struct dm_dev *dev,
1062 struct pgpath *pgpath;
1063 struct priority_group *pg;
1065 list_for_each_entry(pg, &m->priority_groups, list) {
1066 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1067 if (pgpath->path.dev == dev)
1076 * Temporarily try to avoid having to use the specified PG
1078 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1081 unsigned long flags;
1083 spin_lock_irqsave(&m->lock, flags);
1085 pg->bypassed = bypassed;
1086 m->current_pgpath = NULL;
1087 m->current_pg = NULL;
1089 spin_unlock_irqrestore(&m->lock, flags);
1091 schedule_work(&m->trigger_event);
1095 * Switch to using the specified PG from the next I/O that gets mapped
1097 static int switch_pg_num(struct multipath *m, const char *pgstr)
1099 struct priority_group *pg;
1101 unsigned long flags;
1104 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1105 (pgnum > m->nr_priority_groups)) {
1106 DMWARN("invalid PG number supplied to switch_pg_num");
1110 spin_lock_irqsave(&m->lock, flags);
1111 list_for_each_entry(pg, &m->priority_groups, list) {
1116 m->current_pgpath = NULL;
1117 m->current_pg = NULL;
1120 spin_unlock_irqrestore(&m->lock, flags);
1122 schedule_work(&m->trigger_event);
1127 * Set/clear bypassed status of a PG.
1128 * PGs are numbered upwards from 1 in the order they were declared.
1130 static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1132 struct priority_group *pg;
1136 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1137 (pgnum > m->nr_priority_groups)) {
1138 DMWARN("invalid PG number supplied to bypass_pg");
1142 list_for_each_entry(pg, &m->priority_groups, list) {
1147 bypass_pg(m, pg, bypassed);
1152 * Should we retry pg_init immediately?
1154 static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1156 unsigned long flags;
1157 int limit_reached = 0;
1159 spin_lock_irqsave(&m->lock, flags);
1161 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
1162 m->pg_init_required = 1;
1166 spin_unlock_irqrestore(&m->lock, flags);
1168 return limit_reached;
1171 static void pg_init_done(void *data, int errors)
1173 struct pgpath *pgpath = data;
1174 struct priority_group *pg = pgpath->pg;
1175 struct multipath *m = pg->m;
1176 unsigned long flags;
1177 unsigned delay_retry = 0;
1179 /* device or driver problems */
1184 if (!m->hw_handler_name) {
1188 DMERR("Could not failover the device: Handler scsi_dh_%s "
1189 "Error %d.", m->hw_handler_name, errors);
1191 * Fail path for now, so we do not ping pong
1195 case SCSI_DH_DEV_TEMP_BUSY:
1197 * Probably doing something like FW upgrade on the
1198 * controller so try the other pg.
1200 bypass_pg(m, pg, 1);
1203 /* Wait before retrying. */
1205 case SCSI_DH_IMM_RETRY:
1206 case SCSI_DH_RES_TEMP_UNAVAIL:
1207 if (pg_init_limit_reached(m, pgpath))
1213 * We probably do not want to fail the path for a device
1214 * error, but this is what the old dm did. In future
1215 * patches we can do more advanced handling.
1220 spin_lock_irqsave(&m->lock, flags);
1222 if (pgpath == m->current_pgpath) {
1223 DMERR("Could not failover device. Error %d.", errors);
1224 m->current_pgpath = NULL;
1225 m->current_pg = NULL;
1227 } else if (!m->pg_init_required)
1230 if (--m->pg_init_in_progress)
1231 /* Activations of other paths are still on going */
1234 if (m->pg_init_required) {
1235 m->pg_init_delay_retry = delay_retry;
1236 if (__pg_init_all_paths(m))
1242 * Wake up any thread waiting to suspend.
1244 wake_up(&m->pg_init_wait);
1247 spin_unlock_irqrestore(&m->lock, flags);
1250 static void activate_path(struct work_struct *work)
1252 struct pgpath *pgpath =
1253 container_of(work, struct pgpath, activate_path.work);
1255 if (pgpath->is_active)
1256 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1257 pg_init_done, pgpath);
1259 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1262 static int noretry_error(int error)
1273 /* Anything else could be a path failure, so should be retried */
1280 static int do_end_io(struct multipath *m, struct request *clone,
1281 int error, struct dm_mpath_io *mpio)
1284 * We don't queue any clone request inside the multipath target
1285 * during end I/O handling, since those clone requests don't have
1286 * bio clones. If we queue them inside the multipath target,
1287 * we need to make bio clones, that requires memory allocation.
1288 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1289 * don't have bio clones.)
1290 * Instead of queueing the clone request here, we queue the original
1291 * request into dm core, which will remake a clone request and
1292 * clone bios for it and resubmit it later.
1294 int r = DM_ENDIO_REQUEUE;
1295 unsigned long flags;
1297 if (!error && !clone->errors)
1298 return 0; /* I/O complete */
1300 if (noretry_error(error))
1304 fail_path(mpio->pgpath);
1306 spin_lock_irqsave(&m->lock, flags);
1307 if (!m->nr_valid_paths) {
1308 if (!m->queue_if_no_path) {
1309 if (!__must_push_back(m))
1312 if (error == -EBADE)
1316 spin_unlock_irqrestore(&m->lock, flags);
1321 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1322 int error, union map_info *map_context)
1324 struct multipath *m = ti->private;
1325 struct dm_mpath_io *mpio = get_mpio(map_context);
1326 struct pgpath *pgpath;
1327 struct path_selector *ps;
1332 r = do_end_io(m, clone, error, mpio);
1333 pgpath = mpio->pgpath;
1335 ps = &pgpath->pg->ps;
1336 if (ps->type->end_io)
1337 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1339 clear_request_fn_mpio(m, map_context);
1345 * Suspend can't complete until all the I/O is processed so if
1346 * the last path fails we must error any remaining I/O.
1347 * Note that if the freeze_bdev fails while suspending, the
1348 * queue_if_no_path state is lost - userspace should reset it.
1350 static void multipath_presuspend(struct dm_target *ti)
1352 struct multipath *m = (struct multipath *) ti->private;
1354 queue_if_no_path(m, 0, 1);
1357 static void multipath_postsuspend(struct dm_target *ti)
1359 struct multipath *m = ti->private;
1361 mutex_lock(&m->work_mutex);
1362 flush_multipath_work(m);
1363 mutex_unlock(&m->work_mutex);
1367 * Restore the queue_if_no_path setting.
1369 static void multipath_resume(struct dm_target *ti)
1371 struct multipath *m = (struct multipath *) ti->private;
1372 unsigned long flags;
1374 spin_lock_irqsave(&m->lock, flags);
1375 m->queue_if_no_path = m->saved_queue_if_no_path;
1376 spin_unlock_irqrestore(&m->lock, flags);
1380 * Info output has the following format:
1381 * num_multipath_feature_args [multipath_feature_args]*
1382 * num_handler_status_args [handler_status_args]*
1383 * num_groups init_group_number
1384 * [A|D|E num_ps_status_args [ps_status_args]*
1385 * num_paths num_selector_args
1386 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1388 * Table output has the following format (identical to the constructor string):
1389 * num_feature_args [features_args]*
1390 * num_handler_args hw_handler [hw_handler_args]*
1391 * num_groups init_group_number
1392 * [priority selector-name num_ps_args [ps_args]*
1393 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1395 static void multipath_status(struct dm_target *ti, status_type_t type,
1396 unsigned status_flags, char *result, unsigned maxlen)
1399 unsigned long flags;
1400 struct multipath *m = (struct multipath *) ti->private;
1401 struct priority_group *pg;
1406 spin_lock_irqsave(&m->lock, flags);
1409 if (type == STATUSTYPE_INFO)
1410 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
1412 DMEMIT("%u ", m->queue_if_no_path +
1413 (m->pg_init_retries > 0) * 2 +
1414 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1415 m->retain_attached_hw_handler);
1416 if (m->queue_if_no_path)
1417 DMEMIT("queue_if_no_path ");
1418 if (m->pg_init_retries)
1419 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1420 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1421 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1422 if (m->retain_attached_hw_handler)
1423 DMEMIT("retain_attached_hw_handler ");
1426 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1429 DMEMIT("1 %s ", m->hw_handler_name);
1431 DMEMIT("%u ", m->nr_priority_groups);
1434 pg_num = m->next_pg->pg_num;
1435 else if (m->current_pg)
1436 pg_num = m->current_pg->pg_num;
1438 pg_num = (m->nr_priority_groups ? 1 : 0);
1440 DMEMIT("%u ", pg_num);
1443 case STATUSTYPE_INFO:
1444 list_for_each_entry(pg, &m->priority_groups, list) {
1446 state = 'D'; /* Disabled */
1447 else if (pg == m->current_pg)
1448 state = 'A'; /* Currently Active */
1450 state = 'E'; /* Enabled */
1452 DMEMIT("%c ", state);
1454 if (pg->ps.type->status)
1455 sz += pg->ps.type->status(&pg->ps, NULL, type,
1461 DMEMIT("%u %u ", pg->nr_pgpaths,
1462 pg->ps.type->info_args);
1464 list_for_each_entry(p, &pg->pgpaths, list) {
1465 DMEMIT("%s %s %u ", p->path.dev->name,
1466 p->is_active ? "A" : "F",
1468 if (pg->ps.type->status)
1469 sz += pg->ps.type->status(&pg->ps,
1470 &p->path, type, result + sz,
1476 case STATUSTYPE_TABLE:
1477 list_for_each_entry(pg, &m->priority_groups, list) {
1478 DMEMIT("%s ", pg->ps.type->name);
1480 if (pg->ps.type->status)
1481 sz += pg->ps.type->status(&pg->ps, NULL, type,
1487 DMEMIT("%u %u ", pg->nr_pgpaths,
1488 pg->ps.type->table_args);
1490 list_for_each_entry(p, &pg->pgpaths, list) {
1491 DMEMIT("%s ", p->path.dev->name);
1492 if (pg->ps.type->status)
1493 sz += pg->ps.type->status(&pg->ps,
1494 &p->path, type, result + sz,
1501 spin_unlock_irqrestore(&m->lock, flags);
1504 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1508 struct multipath *m = (struct multipath *) ti->private;
1511 mutex_lock(&m->work_mutex);
1513 if (dm_suspended(ti)) {
1519 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1520 r = queue_if_no_path(m, 1, 0);
1522 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1523 r = queue_if_no_path(m, 0, 0);
1529 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1533 if (!strcasecmp(argv[0], "disable_group")) {
1534 r = bypass_pg_num(m, argv[1], 1);
1536 } else if (!strcasecmp(argv[0], "enable_group")) {
1537 r = bypass_pg_num(m, argv[1], 0);
1539 } else if (!strcasecmp(argv[0], "switch_group")) {
1540 r = switch_pg_num(m, argv[1]);
1542 } else if (!strcasecmp(argv[0], "reinstate_path"))
1543 action = reinstate_path;
1544 else if (!strcasecmp(argv[0], "fail_path"))
1547 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1551 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1553 DMWARN("message: error getting device %s",
1558 r = action_dev(m, dev, action);
1560 dm_put_device(ti, dev);
1563 mutex_unlock(&m->work_mutex);
1567 static int multipath_prepare_ioctl(struct dm_target *ti,
1568 struct block_device **bdev, fmode_t *mode)
1570 struct multipath *m = ti->private;
1571 unsigned long flags;
1574 spin_lock_irqsave(&m->lock, flags);
1576 if (!m->current_pgpath)
1577 __choose_pgpath(m, 0);
1579 if (m->current_pgpath) {
1581 *bdev = m->current_pgpath->path.dev->bdev;
1582 *mode = m->current_pgpath->path.dev->mode;
1585 /* pg_init has not started or completed */
1589 /* No path is available */
1590 if (m->queue_if_no_path)
1596 spin_unlock_irqrestore(&m->lock, flags);
1598 if (r == -ENOTCONN) {
1599 spin_lock_irqsave(&m->lock, flags);
1600 if (!m->current_pg) {
1601 /* Path status changed, redo selection */
1602 __choose_pgpath(m, 0);
1604 if (m->pg_init_required)
1605 __pg_init_all_paths(m);
1606 spin_unlock_irqrestore(&m->lock, flags);
1607 dm_table_run_md_queue_async(m->ti->table);
1611 * Only pass ioctls through if the device sizes match exactly.
1613 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1618 static int multipath_iterate_devices(struct dm_target *ti,
1619 iterate_devices_callout_fn fn, void *data)
1621 struct multipath *m = ti->private;
1622 struct priority_group *pg;
1626 list_for_each_entry(pg, &m->priority_groups, list) {
1627 list_for_each_entry(p, &pg->pgpaths, list) {
1628 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1638 static int __pgpath_busy(struct pgpath *pgpath)
1640 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1642 return blk_lld_busy(q);
1646 * We return "busy", only when we can map I/Os but underlying devices
1647 * are busy (so even if we map I/Os now, the I/Os will wait on
1648 * the underlying queue).
1649 * In other words, if we want to kill I/Os or queue them inside us
1650 * due to map unavailability, we don't return "busy". Otherwise,
1651 * dm core won't give us the I/Os and we can't do what we want.
1653 static int multipath_busy(struct dm_target *ti)
1655 int busy = 0, has_active = 0;
1656 struct multipath *m = ti->private;
1657 struct priority_group *pg;
1658 struct pgpath *pgpath;
1659 unsigned long flags;
1661 spin_lock_irqsave(&m->lock, flags);
1663 /* pg_init in progress or no paths available */
1664 if (m->pg_init_in_progress ||
1665 (!m->nr_valid_paths && m->queue_if_no_path)) {
1669 /* Guess which priority_group will be used at next mapping time */
1670 if (unlikely(!m->current_pgpath && m->next_pg))
1672 else if (likely(m->current_pg))
1676 * We don't know which pg will be used at next mapping time.
1677 * We don't call __choose_pgpath() here to avoid to trigger
1678 * pg_init just by busy checking.
1679 * So we don't know whether underlying devices we will be using
1680 * at next mapping time are busy or not. Just try mapping.
1685 * If there is one non-busy active path at least, the path selector
1686 * will be able to select it. So we consider such a pg as not busy.
1689 list_for_each_entry(pgpath, &pg->pgpaths, list)
1690 if (pgpath->is_active) {
1693 if (!__pgpath_busy(pgpath)) {
1701 * No active path in this pg, so this pg won't be used and
1702 * the current_pg will be changed at next mapping time.
1703 * We need to try mapping to determine it.
1708 spin_unlock_irqrestore(&m->lock, flags);
1713 /*-----------------------------------------------------------------
1715 *---------------------------------------------------------------*/
1716 static struct target_type multipath_target = {
1717 .name = "multipath",
1718 .version = {1, 11, 0},
1719 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1720 .module = THIS_MODULE,
1721 .ctr = multipath_ctr,
1722 .dtr = multipath_dtr,
1723 .map_rq = multipath_map,
1724 .clone_and_map_rq = multipath_clone_and_map,
1725 .release_clone_rq = multipath_release_clone,
1726 .rq_end_io = multipath_end_io,
1727 .presuspend = multipath_presuspend,
1728 .postsuspend = multipath_postsuspend,
1729 .resume = multipath_resume,
1730 .status = multipath_status,
1731 .message = multipath_message,
1732 .prepare_ioctl = multipath_prepare_ioctl,
1733 .iterate_devices = multipath_iterate_devices,
1734 .busy = multipath_busy,
1737 static int __init dm_multipath_init(void)
1741 /* allocate a slab for the dm_ios */
1742 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1746 r = dm_register_target(&multipath_target);
1748 DMERR("register failed %d", r);
1750 goto bad_register_target;
1753 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1755 DMERR("failed to create workqueue kmpathd");
1757 goto bad_alloc_kmultipathd;
1761 * A separate workqueue is used to handle the device handlers
1762 * to avoid overloading existing workqueue. Overloading the
1763 * old workqueue would also create a bottleneck in the
1764 * path of the storage hardware device activation.
1766 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1768 if (!kmpath_handlerd) {
1769 DMERR("failed to create workqueue kmpath_handlerd");
1771 goto bad_alloc_kmpath_handlerd;
1774 DMINFO("version %u.%u.%u loaded",
1775 multipath_target.version[0], multipath_target.version[1],
1776 multipath_target.version[2]);
1780 bad_alloc_kmpath_handlerd:
1781 destroy_workqueue(kmultipathd);
1782 bad_alloc_kmultipathd:
1783 dm_unregister_target(&multipath_target);
1784 bad_register_target:
1785 kmem_cache_destroy(_mpio_cache);
1790 static void __exit dm_multipath_exit(void)
1792 destroy_workqueue(kmpath_handlerd);
1793 destroy_workqueue(kmultipathd);
1795 dm_unregister_target(&multipath_target);
1796 kmem_cache_destroy(_mpio_cache);
1799 module_init(dm_multipath_init);
1800 module_exit(dm_multipath_exit);
1802 MODULE_DESCRIPTION(DM_NAME " multipath target");
1803 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1804 MODULE_LICENSE("GPL");