]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/md/dm-mpath.c
2f7c6a550a2d564bec40013548977e9482952fdc
[linux.git] / drivers / md / dm-mpath.c
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include <linux/device-mapper.h>
9
10 #include "dm.h"
11 #include "dm-path-selector.h"
12 #include "dm-uevent.h"
13
14 #include <linux/blkdev.h>
15 #include <linux/ctype.h>
16 #include <linux/init.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/pagemap.h>
20 #include <linux/slab.h>
21 #include <linux/time.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/atomic.h>
26
27 #define DM_MSG_PREFIX "multipath"
28 #define DM_PG_INIT_DELAY_MSECS 2000
29 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
30
31 /* Path properties */
32 struct pgpath {
33         struct list_head list;
34
35         struct priority_group *pg;      /* Owning PG */
36         unsigned is_active;             /* Path status */
37         unsigned fail_count;            /* Cumulative failure count */
38
39         struct dm_path path;
40         struct delayed_work activate_path;
41 };
42
43 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
44
45 /*
46  * Paths are grouped into Priority Groups and numbered from 1 upwards.
47  * Each has a path selector which controls which path gets used.
48  */
49 struct priority_group {
50         struct list_head list;
51
52         struct multipath *m;            /* Owning multipath instance */
53         struct path_selector ps;
54
55         unsigned pg_num;                /* Reference number */
56         unsigned bypassed;              /* Temporarily bypass this PG? */
57
58         unsigned nr_pgpaths;            /* Number of paths in PG */
59         struct list_head pgpaths;
60 };
61
62 /* Multipath context */
63 struct multipath {
64         struct list_head list;
65         struct dm_target *ti;
66
67         const char *hw_handler_name;
68         char *hw_handler_params;
69
70         spinlock_t lock;
71
72         unsigned nr_priority_groups;
73         struct list_head priority_groups;
74
75         wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
76
77         unsigned pg_init_required;      /* pg_init needs calling? */
78         unsigned pg_init_in_progress;   /* Only one pg_init allowed at once */
79         unsigned pg_init_delay_retry;   /* Delay pg_init retry? */
80
81         unsigned nr_valid_paths;        /* Total number of usable paths */
82         struct pgpath *current_pgpath;
83         struct priority_group *current_pg;
84         struct priority_group *next_pg; /* Switch to this PG if set */
85         unsigned repeat_count;          /* I/Os left before calling PS again */
86
87         unsigned queue_io:1;            /* Must we queue all I/O? */
88         unsigned queue_if_no_path:1;    /* Queue I/O if last path fails? */
89         unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
90         unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
91         unsigned pg_init_disabled:1;    /* pg_init is not currently allowed */
92
93         unsigned pg_init_retries;       /* Number of times to retry pg_init */
94         unsigned pg_init_count;         /* Number of times pg_init called */
95         unsigned pg_init_delay_msecs;   /* Number of msecs before pg_init retry */
96
97         struct work_struct trigger_event;
98
99         /*
100          * We must use a mempool of dm_mpath_io structs so that we
101          * can resubmit bios on error.
102          */
103         mempool_t *mpio_pool;
104
105         struct mutex work_mutex;
106 };
107
108 /*
109  * Context information attached to each bio we process.
110  */
111 struct dm_mpath_io {
112         struct pgpath *pgpath;
113         size_t nr_bytes;
114 };
115
116 typedef int (*action_fn) (struct pgpath *pgpath);
117
118 static struct kmem_cache *_mpio_cache;
119
120 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
121 static void trigger_event(struct work_struct *work);
122 static void activate_path(struct work_struct *work);
123 static int __pgpath_busy(struct pgpath *pgpath);
124
125
126 /*-----------------------------------------------
127  * Allocation routines
128  *-----------------------------------------------*/
129
130 static struct pgpath *alloc_pgpath(void)
131 {
132         struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
133
134         if (pgpath) {
135                 pgpath->is_active = 1;
136                 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
137         }
138
139         return pgpath;
140 }
141
142 static void free_pgpath(struct pgpath *pgpath)
143 {
144         kfree(pgpath);
145 }
146
147 static struct priority_group *alloc_priority_group(void)
148 {
149         struct priority_group *pg;
150
151         pg = kzalloc(sizeof(*pg), GFP_KERNEL);
152
153         if (pg)
154                 INIT_LIST_HEAD(&pg->pgpaths);
155
156         return pg;
157 }
158
159 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
160 {
161         struct pgpath *pgpath, *tmp;
162
163         list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
164                 list_del(&pgpath->list);
165                 dm_put_device(ti, pgpath->path.dev);
166                 free_pgpath(pgpath);
167         }
168 }
169
170 static void free_priority_group(struct priority_group *pg,
171                                 struct dm_target *ti)
172 {
173         struct path_selector *ps = &pg->ps;
174
175         if (ps->type) {
176                 ps->type->destroy(ps);
177                 dm_put_path_selector(ps->type);
178         }
179
180         free_pgpaths(&pg->pgpaths, ti);
181         kfree(pg);
182 }
183
184 static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
185 {
186         struct multipath *m;
187
188         m = kzalloc(sizeof(*m), GFP_KERNEL);
189         if (m) {
190                 INIT_LIST_HEAD(&m->priority_groups);
191                 spin_lock_init(&m->lock);
192                 m->queue_io = 1;
193                 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
194                 INIT_WORK(&m->trigger_event, trigger_event);
195                 init_waitqueue_head(&m->pg_init_wait);
196                 mutex_init(&m->work_mutex);
197
198                 m->mpio_pool = NULL;
199                 if (!use_blk_mq) {
200                         unsigned min_ios = dm_get_reserved_rq_based_ios();
201
202                         m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
203                         if (!m->mpio_pool) {
204                                 kfree(m);
205                                 return NULL;
206                         }
207                 }
208
209                 m->ti = ti;
210                 ti->private = m;
211         }
212
213         return m;
214 }
215
216 static void free_multipath(struct multipath *m)
217 {
218         struct priority_group *pg, *tmp;
219
220         list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
221                 list_del(&pg->list);
222                 free_priority_group(pg, m->ti);
223         }
224
225         kfree(m->hw_handler_name);
226         kfree(m->hw_handler_params);
227         mempool_destroy(m->mpio_pool);
228         kfree(m);
229 }
230
231 static struct dm_mpath_io *get_mpio(union map_info *info)
232 {
233         return info->ptr;
234 }
235
236 static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
237 {
238         struct dm_mpath_io *mpio;
239
240         if (!m->mpio_pool) {
241                 /* Use blk-mq pdu memory requested via per_io_data_size */
242                 mpio = get_mpio(info);
243                 memset(mpio, 0, sizeof(*mpio));
244                 return mpio;
245         }
246
247         mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
248         if (!mpio)
249                 return NULL;
250
251         memset(mpio, 0, sizeof(*mpio));
252         info->ptr = mpio;
253
254         return mpio;
255 }
256
257 static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
258 {
259         /* Only needed for non blk-mq (.request_fn) multipath */
260         if (m->mpio_pool) {
261                 struct dm_mpath_io *mpio = info->ptr;
262
263                 info->ptr = NULL;
264                 mempool_free(mpio, m->mpio_pool);
265         }
266 }
267
268 /*-----------------------------------------------
269  * Path selection
270  *-----------------------------------------------*/
271
272 static int __pg_init_all_paths(struct multipath *m)
273 {
274         struct pgpath *pgpath;
275         unsigned long pg_init_delay = 0;
276
277         if (m->pg_init_in_progress || m->pg_init_disabled)
278                 return 0;
279
280         m->pg_init_count++;
281         m->pg_init_required = 0;
282
283         /* Check here to reset pg_init_required */
284         if (!m->current_pg)
285                 return 0;
286
287         if (m->pg_init_delay_retry)
288                 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
289                                                  m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
290         list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
291                 /* Skip failed paths */
292                 if (!pgpath->is_active)
293                         continue;
294                 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
295                                        pg_init_delay))
296                         m->pg_init_in_progress++;
297         }
298         return m->pg_init_in_progress;
299 }
300
301 static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
302 {
303         m->current_pg = pgpath->pg;
304
305         /* Must we initialise the PG first, and queue I/O till it's ready? */
306         if (m->hw_handler_name) {
307                 m->pg_init_required = 1;
308                 m->queue_io = 1;
309         } else {
310                 m->pg_init_required = 0;
311                 m->queue_io = 0;
312         }
313
314         m->pg_init_count = 0;
315 }
316
317 static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
318                                size_t nr_bytes)
319 {
320         struct dm_path *path;
321
322         path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
323         if (!path)
324                 return -ENXIO;
325
326         m->current_pgpath = path_to_pgpath(path);
327
328         if (m->current_pg != pg)
329                 __switch_pg(m, m->current_pgpath);
330
331         return 0;
332 }
333
334 static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
335 {
336         struct priority_group *pg;
337         unsigned bypassed = 1;
338
339         if (!m->nr_valid_paths) {
340                 m->queue_io = 0;
341                 goto failed;
342         }
343
344         /* Were we instructed to switch PG? */
345         if (m->next_pg) {
346                 pg = m->next_pg;
347                 m->next_pg = NULL;
348                 if (!__choose_path_in_pg(m, pg, nr_bytes))
349                         return;
350         }
351
352         /* Don't change PG until it has no remaining paths */
353         if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
354                 return;
355
356         /*
357          * Loop through priority groups until we find a valid path.
358          * First time we skip PGs marked 'bypassed'.
359          * Second time we only try the ones we skipped, but set
360          * pg_init_delay_retry so we do not hammer controllers.
361          */
362         do {
363                 list_for_each_entry(pg, &m->priority_groups, list) {
364                         if (pg->bypassed == bypassed)
365                                 continue;
366                         if (!__choose_path_in_pg(m, pg, nr_bytes)) {
367                                 if (!bypassed)
368                                         m->pg_init_delay_retry = 1;
369                                 return;
370                         }
371                 }
372         } while (bypassed--);
373
374 failed:
375         m->current_pgpath = NULL;
376         m->current_pg = NULL;
377 }
378
379 /*
380  * Check whether bios must be queued in the device-mapper core rather
381  * than here in the target.
382  *
383  * m->lock must be held on entry.
384  *
385  * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
386  * same value then we are not between multipath_presuspend()
387  * and multipath_resume() calls and we have no need to check
388  * for the DMF_NOFLUSH_SUSPENDING flag.
389  */
390 static int __must_push_back(struct multipath *m)
391 {
392         return (m->queue_if_no_path ||
393                 (m->queue_if_no_path != m->saved_queue_if_no_path &&
394                  dm_noflush_suspending(m->ti)));
395 }
396
397 /*
398  * Map cloned requests
399  */
400 static int __multipath_map(struct dm_target *ti, struct request *clone,
401                            union map_info *map_context,
402                            struct request *rq, struct request **__clone)
403 {
404         struct multipath *m = (struct multipath *) ti->private;
405         int r = DM_MAPIO_REQUEUE;
406         size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
407         struct pgpath *pgpath;
408         struct block_device *bdev;
409         struct dm_mpath_io *mpio;
410
411         spin_lock_irq(&m->lock);
412
413         /* Do we need to select a new pgpath? */
414         if (!m->current_pgpath ||
415             (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
416                 __choose_pgpath(m, nr_bytes);
417
418         pgpath = m->current_pgpath;
419
420         if (!pgpath) {
421                 if (!__must_push_back(m))
422                         r = -EIO;       /* Failed */
423                 goto out_unlock;
424         } else if (m->queue_io || m->pg_init_required) {
425                 __pg_init_all_paths(m);
426                 goto out_unlock;
427         }
428
429         mpio = set_mpio(m, map_context);
430         if (!mpio)
431                 /* ENOMEM, requeue */
432                 goto out_unlock;
433
434         mpio->pgpath = pgpath;
435         mpio->nr_bytes = nr_bytes;
436
437         bdev = pgpath->path.dev->bdev;
438
439         spin_unlock_irq(&m->lock);
440
441         if (clone) {
442                 /*
443                  * Old request-based interface: allocated clone is passed in.
444                  * Used by: .request_fn stacked on .request_fn path(s).
445                  */
446                 clone->q = bdev_get_queue(bdev);
447                 clone->rq_disk = bdev->bd_disk;
448                 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
449         } else {
450                 /*
451                  * blk-mq request-based interface; used by both:
452                  * .request_fn stacked on blk-mq path(s) and
453                  * blk-mq stacked on blk-mq path(s).
454                  */
455                 *__clone = blk_get_request(bdev_get_queue(bdev),
456                                            rq_data_dir(rq), GFP_ATOMIC);
457                 if (IS_ERR(*__clone)) {
458                         /* ENOMEM, requeue */
459                         clear_request_fn_mpio(m, map_context);
460                         return r;
461                 }
462                 (*__clone)->bio = (*__clone)->biotail = NULL;
463                 (*__clone)->rq_disk = bdev->bd_disk;
464                 (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
465         }
466
467         if (pgpath->pg->ps.type->start_io)
468                 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
469                                               &pgpath->path,
470                                               nr_bytes);
471         return DM_MAPIO_REMAPPED;
472
473 out_unlock:
474         spin_unlock_irq(&m->lock);
475
476         return r;
477 }
478
479 static int multipath_map(struct dm_target *ti, struct request *clone,
480                          union map_info *map_context)
481 {
482         return __multipath_map(ti, clone, map_context, NULL, NULL);
483 }
484
485 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
486                                    union map_info *map_context,
487                                    struct request **clone)
488 {
489         return __multipath_map(ti, NULL, map_context, rq, clone);
490 }
491
492 static void multipath_release_clone(struct request *clone)
493 {
494         blk_put_request(clone);
495 }
496
497 /*
498  * If we run out of usable paths, should we queue I/O or error it?
499  */
500 static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
501                             unsigned save_old_value)
502 {
503         unsigned long flags;
504
505         spin_lock_irqsave(&m->lock, flags);
506
507         if (save_old_value)
508                 m->saved_queue_if_no_path = m->queue_if_no_path;
509         else
510                 m->saved_queue_if_no_path = queue_if_no_path;
511         m->queue_if_no_path = queue_if_no_path;
512         spin_unlock_irqrestore(&m->lock, flags);
513
514         if (!queue_if_no_path)
515                 dm_table_run_md_queue_async(m->ti->table);
516
517         return 0;
518 }
519
520 /*
521  * An event is triggered whenever a path is taken out of use.
522  * Includes path failure and PG bypass.
523  */
524 static void trigger_event(struct work_struct *work)
525 {
526         struct multipath *m =
527                 container_of(work, struct multipath, trigger_event);
528
529         dm_table_event(m->ti->table);
530 }
531
532 /*-----------------------------------------------------------------
533  * Constructor/argument parsing:
534  * <#multipath feature args> [<arg>]*
535  * <#hw_handler args> [hw_handler [<arg>]*]
536  * <#priority groups>
537  * <initial priority group>
538  *     [<selector> <#selector args> [<arg>]*
539  *      <#paths> <#per-path selector args>
540  *         [<path> [<arg>]* ]+ ]+
541  *---------------------------------------------------------------*/
542 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
543                                struct dm_target *ti)
544 {
545         int r;
546         struct path_selector_type *pst;
547         unsigned ps_argc;
548
549         static struct dm_arg _args[] = {
550                 {0, 1024, "invalid number of path selector args"},
551         };
552
553         pst = dm_get_path_selector(dm_shift_arg(as));
554         if (!pst) {
555                 ti->error = "unknown path selector type";
556                 return -EINVAL;
557         }
558
559         r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
560         if (r) {
561                 dm_put_path_selector(pst);
562                 return -EINVAL;
563         }
564
565         r = pst->create(&pg->ps, ps_argc, as->argv);
566         if (r) {
567                 dm_put_path_selector(pst);
568                 ti->error = "path selector constructor failed";
569                 return r;
570         }
571
572         pg->ps.type = pst;
573         dm_consume_args(as, ps_argc);
574
575         return 0;
576 }
577
578 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
579                                struct dm_target *ti)
580 {
581         int r;
582         struct pgpath *p;
583         struct multipath *m = ti->private;
584         struct request_queue *q = NULL;
585         const char *attached_handler_name;
586
587         /* we need at least a path arg */
588         if (as->argc < 1) {
589                 ti->error = "no device given";
590                 return ERR_PTR(-EINVAL);
591         }
592
593         p = alloc_pgpath();
594         if (!p)
595                 return ERR_PTR(-ENOMEM);
596
597         r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
598                           &p->path.dev);
599         if (r) {
600                 ti->error = "error getting device";
601                 goto bad;
602         }
603
604         if (m->retain_attached_hw_handler || m->hw_handler_name)
605                 q = bdev_get_queue(p->path.dev->bdev);
606
607         if (m->retain_attached_hw_handler) {
608 retain:
609                 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
610                 if (attached_handler_name) {
611                         /*
612                          * Reset hw_handler_name to match the attached handler
613                          * and clear any hw_handler_params associated with the
614                          * ignored handler.
615                          *
616                          * NB. This modifies the table line to show the actual
617                          * handler instead of the original table passed in.
618                          */
619                         kfree(m->hw_handler_name);
620                         m->hw_handler_name = attached_handler_name;
621
622                         kfree(m->hw_handler_params);
623                         m->hw_handler_params = NULL;
624                 }
625         }
626
627         if (m->hw_handler_name) {
628                 r = scsi_dh_attach(q, m->hw_handler_name);
629                 if (r == -EBUSY) {
630                         char b[BDEVNAME_SIZE];
631
632                         printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
633                                 bdevname(p->path.dev->bdev, b));
634                         goto retain;
635                 }
636                 if (r < 0) {
637                         ti->error = "error attaching hardware handler";
638                         dm_put_device(ti, p->path.dev);
639                         goto bad;
640                 }
641
642                 if (m->hw_handler_params) {
643                         r = scsi_dh_set_params(q, m->hw_handler_params);
644                         if (r < 0) {
645                                 ti->error = "unable to set hardware "
646                                                         "handler parameters";
647                                 dm_put_device(ti, p->path.dev);
648                                 goto bad;
649                         }
650                 }
651         }
652
653         r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
654         if (r) {
655                 dm_put_device(ti, p->path.dev);
656                 goto bad;
657         }
658
659         return p;
660
661  bad:
662         free_pgpath(p);
663         return ERR_PTR(r);
664 }
665
666 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
667                                                    struct multipath *m)
668 {
669         static struct dm_arg _args[] = {
670                 {1, 1024, "invalid number of paths"},
671                 {0, 1024, "invalid number of selector args"}
672         };
673
674         int r;
675         unsigned i, nr_selector_args, nr_args;
676         struct priority_group *pg;
677         struct dm_target *ti = m->ti;
678
679         if (as->argc < 2) {
680                 as->argc = 0;
681                 ti->error = "not enough priority group arguments";
682                 return ERR_PTR(-EINVAL);
683         }
684
685         pg = alloc_priority_group();
686         if (!pg) {
687                 ti->error = "couldn't allocate priority group";
688                 return ERR_PTR(-ENOMEM);
689         }
690         pg->m = m;
691
692         r = parse_path_selector(as, pg, ti);
693         if (r)
694                 goto bad;
695
696         /*
697          * read the paths
698          */
699         r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
700         if (r)
701                 goto bad;
702
703         r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
704         if (r)
705                 goto bad;
706
707         nr_args = 1 + nr_selector_args;
708         for (i = 0; i < pg->nr_pgpaths; i++) {
709                 struct pgpath *pgpath;
710                 struct dm_arg_set path_args;
711
712                 if (as->argc < nr_args) {
713                         ti->error = "not enough path parameters";
714                         r = -EINVAL;
715                         goto bad;
716                 }
717
718                 path_args.argc = nr_args;
719                 path_args.argv = as->argv;
720
721                 pgpath = parse_path(&path_args, &pg->ps, ti);
722                 if (IS_ERR(pgpath)) {
723                         r = PTR_ERR(pgpath);
724                         goto bad;
725                 }
726
727                 pgpath->pg = pg;
728                 list_add_tail(&pgpath->list, &pg->pgpaths);
729                 dm_consume_args(as, nr_args);
730         }
731
732         return pg;
733
734  bad:
735         free_priority_group(pg, ti);
736         return ERR_PTR(r);
737 }
738
739 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
740 {
741         unsigned hw_argc;
742         int ret;
743         struct dm_target *ti = m->ti;
744
745         static struct dm_arg _args[] = {
746                 {0, 1024, "invalid number of hardware handler args"},
747         };
748
749         if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
750                 return -EINVAL;
751
752         if (!hw_argc)
753                 return 0;
754
755         m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
756
757         if (hw_argc > 1) {
758                 char *p;
759                 int i, j, len = 4;
760
761                 for (i = 0; i <= hw_argc - 2; i++)
762                         len += strlen(as->argv[i]) + 1;
763                 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
764                 if (!p) {
765                         ti->error = "memory allocation failed";
766                         ret = -ENOMEM;
767                         goto fail;
768                 }
769                 j = sprintf(p, "%d", hw_argc - 1);
770                 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
771                         j = sprintf(p, "%s", as->argv[i]);
772         }
773         dm_consume_args(as, hw_argc - 1);
774
775         return 0;
776 fail:
777         kfree(m->hw_handler_name);
778         m->hw_handler_name = NULL;
779         return ret;
780 }
781
782 static int parse_features(struct dm_arg_set *as, struct multipath *m)
783 {
784         int r;
785         unsigned argc;
786         struct dm_target *ti = m->ti;
787         const char *arg_name;
788
789         static struct dm_arg _args[] = {
790                 {0, 6, "invalid number of feature args"},
791                 {1, 50, "pg_init_retries must be between 1 and 50"},
792                 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
793         };
794
795         r = dm_read_arg_group(_args, as, &argc, &ti->error);
796         if (r)
797                 return -EINVAL;
798
799         if (!argc)
800                 return 0;
801
802         do {
803                 arg_name = dm_shift_arg(as);
804                 argc--;
805
806                 if (!strcasecmp(arg_name, "queue_if_no_path")) {
807                         r = queue_if_no_path(m, 1, 0);
808                         continue;
809                 }
810
811                 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
812                         m->retain_attached_hw_handler = 1;
813                         continue;
814                 }
815
816                 if (!strcasecmp(arg_name, "pg_init_retries") &&
817                     (argc >= 1)) {
818                         r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
819                         argc--;
820                         continue;
821                 }
822
823                 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
824                     (argc >= 1)) {
825                         r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
826                         argc--;
827                         continue;
828                 }
829
830                 ti->error = "Unrecognised multipath feature request";
831                 r = -EINVAL;
832         } while (argc && !r);
833
834         return r;
835 }
836
837 static int multipath_ctr(struct dm_target *ti, unsigned int argc,
838                          char **argv)
839 {
840         /* target arguments */
841         static struct dm_arg _args[] = {
842                 {0, 1024, "invalid number of priority groups"},
843                 {0, 1024, "invalid initial priority group number"},
844         };
845
846         int r;
847         struct multipath *m;
848         struct dm_arg_set as;
849         unsigned pg_count = 0;
850         unsigned next_pg_num;
851         bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
852
853         as.argc = argc;
854         as.argv = argv;
855
856         m = alloc_multipath(ti, use_blk_mq);
857         if (!m) {
858                 ti->error = "can't allocate multipath";
859                 return -EINVAL;
860         }
861
862         r = parse_features(&as, m);
863         if (r)
864                 goto bad;
865
866         r = parse_hw_handler(&as, m);
867         if (r)
868                 goto bad;
869
870         r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
871         if (r)
872                 goto bad;
873
874         r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
875         if (r)
876                 goto bad;
877
878         if ((!m->nr_priority_groups && next_pg_num) ||
879             (m->nr_priority_groups && !next_pg_num)) {
880                 ti->error = "invalid initial priority group";
881                 r = -EINVAL;
882                 goto bad;
883         }
884
885         /* parse the priority groups */
886         while (as.argc) {
887                 struct priority_group *pg;
888
889                 pg = parse_priority_group(&as, m);
890                 if (IS_ERR(pg)) {
891                         r = PTR_ERR(pg);
892                         goto bad;
893                 }
894
895                 m->nr_valid_paths += pg->nr_pgpaths;
896                 list_add_tail(&pg->list, &m->priority_groups);
897                 pg_count++;
898                 pg->pg_num = pg_count;
899                 if (!--next_pg_num)
900                         m->next_pg = pg;
901         }
902
903         if (pg_count != m->nr_priority_groups) {
904                 ti->error = "priority group count mismatch";
905                 r = -EINVAL;
906                 goto bad;
907         }
908
909         ti->num_flush_bios = 1;
910         ti->num_discard_bios = 1;
911         ti->num_write_same_bios = 1;
912         if (use_blk_mq)
913                 ti->per_io_data_size = sizeof(struct dm_mpath_io);
914
915         return 0;
916
917  bad:
918         free_multipath(m);
919         return r;
920 }
921
922 static void multipath_wait_for_pg_init_completion(struct multipath *m)
923 {
924         DECLARE_WAITQUEUE(wait, current);
925         unsigned long flags;
926
927         add_wait_queue(&m->pg_init_wait, &wait);
928
929         while (1) {
930                 set_current_state(TASK_UNINTERRUPTIBLE);
931
932                 spin_lock_irqsave(&m->lock, flags);
933                 if (!m->pg_init_in_progress) {
934                         spin_unlock_irqrestore(&m->lock, flags);
935                         break;
936                 }
937                 spin_unlock_irqrestore(&m->lock, flags);
938
939                 io_schedule();
940         }
941         set_current_state(TASK_RUNNING);
942
943         remove_wait_queue(&m->pg_init_wait, &wait);
944 }
945
946 static void flush_multipath_work(struct multipath *m)
947 {
948         unsigned long flags;
949
950         spin_lock_irqsave(&m->lock, flags);
951         m->pg_init_disabled = 1;
952         spin_unlock_irqrestore(&m->lock, flags);
953
954         flush_workqueue(kmpath_handlerd);
955         multipath_wait_for_pg_init_completion(m);
956         flush_workqueue(kmultipathd);
957         flush_work(&m->trigger_event);
958
959         spin_lock_irqsave(&m->lock, flags);
960         m->pg_init_disabled = 0;
961         spin_unlock_irqrestore(&m->lock, flags);
962 }
963
964 static void multipath_dtr(struct dm_target *ti)
965 {
966         struct multipath *m = ti->private;
967
968         flush_multipath_work(m);
969         free_multipath(m);
970 }
971
972 /*
973  * Take a path out of use.
974  */
975 static int fail_path(struct pgpath *pgpath)
976 {
977         unsigned long flags;
978         struct multipath *m = pgpath->pg->m;
979
980         spin_lock_irqsave(&m->lock, flags);
981
982         if (!pgpath->is_active)
983                 goto out;
984
985         DMWARN("Failing path %s.", pgpath->path.dev->name);
986
987         pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
988         pgpath->is_active = 0;
989         pgpath->fail_count++;
990
991         m->nr_valid_paths--;
992
993         if (pgpath == m->current_pgpath)
994                 m->current_pgpath = NULL;
995
996         dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
997                       pgpath->path.dev->name, m->nr_valid_paths);
998
999         schedule_work(&m->trigger_event);
1000
1001 out:
1002         spin_unlock_irqrestore(&m->lock, flags);
1003
1004         return 0;
1005 }
1006
1007 /*
1008  * Reinstate a previously-failed path
1009  */
1010 static int reinstate_path(struct pgpath *pgpath)
1011 {
1012         int r = 0, run_queue = 0;
1013         unsigned long flags;
1014         struct multipath *m = pgpath->pg->m;
1015
1016         spin_lock_irqsave(&m->lock, flags);
1017
1018         if (pgpath->is_active)
1019                 goto out;
1020
1021         if (!pgpath->pg->ps.type->reinstate_path) {
1022                 DMWARN("Reinstate path not supported by path selector %s",
1023                        pgpath->pg->ps.type->name);
1024                 r = -EINVAL;
1025                 goto out;
1026         }
1027
1028         r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1029         if (r)
1030                 goto out;
1031
1032         pgpath->is_active = 1;
1033
1034         if (!m->nr_valid_paths++) {
1035                 m->current_pgpath = NULL;
1036                 run_queue = 1;
1037         } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1038                 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1039                         m->pg_init_in_progress++;
1040         }
1041
1042         dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1043                       pgpath->path.dev->name, m->nr_valid_paths);
1044
1045         schedule_work(&m->trigger_event);
1046
1047 out:
1048         spin_unlock_irqrestore(&m->lock, flags);
1049         if (run_queue)
1050                 dm_table_run_md_queue_async(m->ti->table);
1051
1052         return r;
1053 }
1054
1055 /*
1056  * Fail or reinstate all paths that match the provided struct dm_dev.
1057  */
1058 static int action_dev(struct multipath *m, struct dm_dev *dev,
1059                       action_fn action)
1060 {
1061         int r = -EINVAL;
1062         struct pgpath *pgpath;
1063         struct priority_group *pg;
1064
1065         list_for_each_entry(pg, &m->priority_groups, list) {
1066                 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1067                         if (pgpath->path.dev == dev)
1068                                 r = action(pgpath);
1069                 }
1070         }
1071
1072         return r;
1073 }
1074
1075 /*
1076  * Temporarily try to avoid having to use the specified PG
1077  */
1078 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1079                       int bypassed)
1080 {
1081         unsigned long flags;
1082
1083         spin_lock_irqsave(&m->lock, flags);
1084
1085         pg->bypassed = bypassed;
1086         m->current_pgpath = NULL;
1087         m->current_pg = NULL;
1088
1089         spin_unlock_irqrestore(&m->lock, flags);
1090
1091         schedule_work(&m->trigger_event);
1092 }
1093
1094 /*
1095  * Switch to using the specified PG from the next I/O that gets mapped
1096  */
1097 static int switch_pg_num(struct multipath *m, const char *pgstr)
1098 {
1099         struct priority_group *pg;
1100         unsigned pgnum;
1101         unsigned long flags;
1102         char dummy;
1103
1104         if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1105             (pgnum > m->nr_priority_groups)) {
1106                 DMWARN("invalid PG number supplied to switch_pg_num");
1107                 return -EINVAL;
1108         }
1109
1110         spin_lock_irqsave(&m->lock, flags);
1111         list_for_each_entry(pg, &m->priority_groups, list) {
1112                 pg->bypassed = 0;
1113                 if (--pgnum)
1114                         continue;
1115
1116                 m->current_pgpath = NULL;
1117                 m->current_pg = NULL;
1118                 m->next_pg = pg;
1119         }
1120         spin_unlock_irqrestore(&m->lock, flags);
1121
1122         schedule_work(&m->trigger_event);
1123         return 0;
1124 }
1125
1126 /*
1127  * Set/clear bypassed status of a PG.
1128  * PGs are numbered upwards from 1 in the order they were declared.
1129  */
1130 static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1131 {
1132         struct priority_group *pg;
1133         unsigned pgnum;
1134         char dummy;
1135
1136         if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1137             (pgnum > m->nr_priority_groups)) {
1138                 DMWARN("invalid PG number supplied to bypass_pg");
1139                 return -EINVAL;
1140         }
1141
1142         list_for_each_entry(pg, &m->priority_groups, list) {
1143                 if (!--pgnum)
1144                         break;
1145         }
1146
1147         bypass_pg(m, pg, bypassed);
1148         return 0;
1149 }
1150
1151 /*
1152  * Should we retry pg_init immediately?
1153  */
1154 static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1155 {
1156         unsigned long flags;
1157         int limit_reached = 0;
1158
1159         spin_lock_irqsave(&m->lock, flags);
1160
1161         if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
1162                 m->pg_init_required = 1;
1163         else
1164                 limit_reached = 1;
1165
1166         spin_unlock_irqrestore(&m->lock, flags);
1167
1168         return limit_reached;
1169 }
1170
1171 static void pg_init_done(void *data, int errors)
1172 {
1173         struct pgpath *pgpath = data;
1174         struct priority_group *pg = pgpath->pg;
1175         struct multipath *m = pg->m;
1176         unsigned long flags;
1177         unsigned delay_retry = 0;
1178
1179         /* device or driver problems */
1180         switch (errors) {
1181         case SCSI_DH_OK:
1182                 break;
1183         case SCSI_DH_NOSYS:
1184                 if (!m->hw_handler_name) {
1185                         errors = 0;
1186                         break;
1187                 }
1188                 DMERR("Could not failover the device: Handler scsi_dh_%s "
1189                       "Error %d.", m->hw_handler_name, errors);
1190                 /*
1191                  * Fail path for now, so we do not ping pong
1192                  */
1193                 fail_path(pgpath);
1194                 break;
1195         case SCSI_DH_DEV_TEMP_BUSY:
1196                 /*
1197                  * Probably doing something like FW upgrade on the
1198                  * controller so try the other pg.
1199                  */
1200                 bypass_pg(m, pg, 1);
1201                 break;
1202         case SCSI_DH_RETRY:
1203                 /* Wait before retrying. */
1204                 delay_retry = 1;
1205         case SCSI_DH_IMM_RETRY:
1206         case SCSI_DH_RES_TEMP_UNAVAIL:
1207                 if (pg_init_limit_reached(m, pgpath))
1208                         fail_path(pgpath);
1209                 errors = 0;
1210                 break;
1211         default:
1212                 /*
1213                  * We probably do not want to fail the path for a device
1214                  * error, but this is what the old dm did. In future
1215                  * patches we can do more advanced handling.
1216                  */
1217                 fail_path(pgpath);
1218         }
1219
1220         spin_lock_irqsave(&m->lock, flags);
1221         if (errors) {
1222                 if (pgpath == m->current_pgpath) {
1223                         DMERR("Could not failover device. Error %d.", errors);
1224                         m->current_pgpath = NULL;
1225                         m->current_pg = NULL;
1226                 }
1227         } else if (!m->pg_init_required)
1228                 pg->bypassed = 0;
1229
1230         if (--m->pg_init_in_progress)
1231                 /* Activations of other paths are still on going */
1232                 goto out;
1233
1234         if (m->pg_init_required) {
1235                 m->pg_init_delay_retry = delay_retry;
1236                 if (__pg_init_all_paths(m))
1237                         goto out;
1238         }
1239         m->queue_io = 0;
1240
1241         /*
1242          * Wake up any thread waiting to suspend.
1243          */
1244         wake_up(&m->pg_init_wait);
1245
1246 out:
1247         spin_unlock_irqrestore(&m->lock, flags);
1248 }
1249
1250 static void activate_path(struct work_struct *work)
1251 {
1252         struct pgpath *pgpath =
1253                 container_of(work, struct pgpath, activate_path.work);
1254
1255         if (pgpath->is_active)
1256                 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1257                                  pg_init_done, pgpath);
1258         else
1259                 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1260 }
1261
1262 static int noretry_error(int error)
1263 {
1264         switch (error) {
1265         case -EOPNOTSUPP:
1266         case -EREMOTEIO:
1267         case -EILSEQ:
1268         case -ENODATA:
1269         case -ENOSPC:
1270                 return 1;
1271         }
1272
1273         /* Anything else could be a path failure, so should be retried */
1274         return 0;
1275 }
1276
1277 /*
1278  * end_io handling
1279  */
1280 static int do_end_io(struct multipath *m, struct request *clone,
1281                      int error, struct dm_mpath_io *mpio)
1282 {
1283         /*
1284          * We don't queue any clone request inside the multipath target
1285          * during end I/O handling, since those clone requests don't have
1286          * bio clones.  If we queue them inside the multipath target,
1287          * we need to make bio clones, that requires memory allocation.
1288          * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1289          *  don't have bio clones.)
1290          * Instead of queueing the clone request here, we queue the original
1291          * request into dm core, which will remake a clone request and
1292          * clone bios for it and resubmit it later.
1293          */
1294         int r = DM_ENDIO_REQUEUE;
1295         unsigned long flags;
1296
1297         if (!error && !clone->errors)
1298                 return 0;       /* I/O complete */
1299
1300         if (noretry_error(error))
1301                 return error;
1302
1303         if (mpio->pgpath)
1304                 fail_path(mpio->pgpath);
1305
1306         spin_lock_irqsave(&m->lock, flags);
1307         if (!m->nr_valid_paths) {
1308                 if (!m->queue_if_no_path) {
1309                         if (!__must_push_back(m))
1310                                 r = -EIO;
1311                 } else {
1312                         if (error == -EBADE)
1313                                 r = error;
1314                 }
1315         }
1316         spin_unlock_irqrestore(&m->lock, flags);
1317
1318         return r;
1319 }
1320
1321 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1322                             int error, union map_info *map_context)
1323 {
1324         struct multipath *m = ti->private;
1325         struct dm_mpath_io *mpio = get_mpio(map_context);
1326         struct pgpath *pgpath;
1327         struct path_selector *ps;
1328         int r;
1329
1330         BUG_ON(!mpio);
1331
1332         r = do_end_io(m, clone, error, mpio);
1333         pgpath = mpio->pgpath;
1334         if (pgpath) {
1335                 ps = &pgpath->pg->ps;
1336                 if (ps->type->end_io)
1337                         ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1338         }
1339         clear_request_fn_mpio(m, map_context);
1340
1341         return r;
1342 }
1343
1344 /*
1345  * Suspend can't complete until all the I/O is processed so if
1346  * the last path fails we must error any remaining I/O.
1347  * Note that if the freeze_bdev fails while suspending, the
1348  * queue_if_no_path state is lost - userspace should reset it.
1349  */
1350 static void multipath_presuspend(struct dm_target *ti)
1351 {
1352         struct multipath *m = (struct multipath *) ti->private;
1353
1354         queue_if_no_path(m, 0, 1);
1355 }
1356
1357 static void multipath_postsuspend(struct dm_target *ti)
1358 {
1359         struct multipath *m = ti->private;
1360
1361         mutex_lock(&m->work_mutex);
1362         flush_multipath_work(m);
1363         mutex_unlock(&m->work_mutex);
1364 }
1365
1366 /*
1367  * Restore the queue_if_no_path setting.
1368  */
1369 static void multipath_resume(struct dm_target *ti)
1370 {
1371         struct multipath *m = (struct multipath *) ti->private;
1372         unsigned long flags;
1373
1374         spin_lock_irqsave(&m->lock, flags);
1375         m->queue_if_no_path = m->saved_queue_if_no_path;
1376         spin_unlock_irqrestore(&m->lock, flags);
1377 }
1378
1379 /*
1380  * Info output has the following format:
1381  * num_multipath_feature_args [multipath_feature_args]*
1382  * num_handler_status_args [handler_status_args]*
1383  * num_groups init_group_number
1384  *            [A|D|E num_ps_status_args [ps_status_args]*
1385  *             num_paths num_selector_args
1386  *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1387  *
1388  * Table output has the following format (identical to the constructor string):
1389  * num_feature_args [features_args]*
1390  * num_handler_args hw_handler [hw_handler_args]*
1391  * num_groups init_group_number
1392  *     [priority selector-name num_ps_args [ps_args]*
1393  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1394  */
1395 static void multipath_status(struct dm_target *ti, status_type_t type,
1396                              unsigned status_flags, char *result, unsigned maxlen)
1397 {
1398         int sz = 0;
1399         unsigned long flags;
1400         struct multipath *m = (struct multipath *) ti->private;
1401         struct priority_group *pg;
1402         struct pgpath *p;
1403         unsigned pg_num;
1404         char state;
1405
1406         spin_lock_irqsave(&m->lock, flags);
1407
1408         /* Features */
1409         if (type == STATUSTYPE_INFO)
1410                 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
1411         else {
1412                 DMEMIT("%u ", m->queue_if_no_path +
1413                               (m->pg_init_retries > 0) * 2 +
1414                               (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1415                               m->retain_attached_hw_handler);
1416                 if (m->queue_if_no_path)
1417                         DMEMIT("queue_if_no_path ");
1418                 if (m->pg_init_retries)
1419                         DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1420                 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1421                         DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1422                 if (m->retain_attached_hw_handler)
1423                         DMEMIT("retain_attached_hw_handler ");
1424         }
1425
1426         if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1427                 DMEMIT("0 ");
1428         else
1429                 DMEMIT("1 %s ", m->hw_handler_name);
1430
1431         DMEMIT("%u ", m->nr_priority_groups);
1432
1433         if (m->next_pg)
1434                 pg_num = m->next_pg->pg_num;
1435         else if (m->current_pg)
1436                 pg_num = m->current_pg->pg_num;
1437         else
1438                 pg_num = (m->nr_priority_groups ? 1 : 0);
1439
1440         DMEMIT("%u ", pg_num);
1441
1442         switch (type) {
1443         case STATUSTYPE_INFO:
1444                 list_for_each_entry(pg, &m->priority_groups, list) {
1445                         if (pg->bypassed)
1446                                 state = 'D';    /* Disabled */
1447                         else if (pg == m->current_pg)
1448                                 state = 'A';    /* Currently Active */
1449                         else
1450                                 state = 'E';    /* Enabled */
1451
1452                         DMEMIT("%c ", state);
1453
1454                         if (pg->ps.type->status)
1455                                 sz += pg->ps.type->status(&pg->ps, NULL, type,
1456                                                           result + sz,
1457                                                           maxlen - sz);
1458                         else
1459                                 DMEMIT("0 ");
1460
1461                         DMEMIT("%u %u ", pg->nr_pgpaths,
1462                                pg->ps.type->info_args);
1463
1464                         list_for_each_entry(p, &pg->pgpaths, list) {
1465                                 DMEMIT("%s %s %u ", p->path.dev->name,
1466                                        p->is_active ? "A" : "F",
1467                                        p->fail_count);
1468                                 if (pg->ps.type->status)
1469                                         sz += pg->ps.type->status(&pg->ps,
1470                                               &p->path, type, result + sz,
1471                                               maxlen - sz);
1472                         }
1473                 }
1474                 break;
1475
1476         case STATUSTYPE_TABLE:
1477                 list_for_each_entry(pg, &m->priority_groups, list) {
1478                         DMEMIT("%s ", pg->ps.type->name);
1479
1480                         if (pg->ps.type->status)
1481                                 sz += pg->ps.type->status(&pg->ps, NULL, type,
1482                                                           result + sz,
1483                                                           maxlen - sz);
1484                         else
1485                                 DMEMIT("0 ");
1486
1487                         DMEMIT("%u %u ", pg->nr_pgpaths,
1488                                pg->ps.type->table_args);
1489
1490                         list_for_each_entry(p, &pg->pgpaths, list) {
1491                                 DMEMIT("%s ", p->path.dev->name);
1492                                 if (pg->ps.type->status)
1493                                         sz += pg->ps.type->status(&pg->ps,
1494                                               &p->path, type, result + sz,
1495                                               maxlen - sz);
1496                         }
1497                 }
1498                 break;
1499         }
1500
1501         spin_unlock_irqrestore(&m->lock, flags);
1502 }
1503
1504 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1505 {
1506         int r = -EINVAL;
1507         struct dm_dev *dev;
1508         struct multipath *m = (struct multipath *) ti->private;
1509         action_fn action;
1510
1511         mutex_lock(&m->work_mutex);
1512
1513         if (dm_suspended(ti)) {
1514                 r = -EBUSY;
1515                 goto out;
1516         }
1517
1518         if (argc == 1) {
1519                 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1520                         r = queue_if_no_path(m, 1, 0);
1521                         goto out;
1522                 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1523                         r = queue_if_no_path(m, 0, 0);
1524                         goto out;
1525                 }
1526         }
1527
1528         if (argc != 2) {
1529                 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1530                 goto out;
1531         }
1532
1533         if (!strcasecmp(argv[0], "disable_group")) {
1534                 r = bypass_pg_num(m, argv[1], 1);
1535                 goto out;
1536         } else if (!strcasecmp(argv[0], "enable_group")) {
1537                 r = bypass_pg_num(m, argv[1], 0);
1538                 goto out;
1539         } else if (!strcasecmp(argv[0], "switch_group")) {
1540                 r = switch_pg_num(m, argv[1]);
1541                 goto out;
1542         } else if (!strcasecmp(argv[0], "reinstate_path"))
1543                 action = reinstate_path;
1544         else if (!strcasecmp(argv[0], "fail_path"))
1545                 action = fail_path;
1546         else {
1547                 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1548                 goto out;
1549         }
1550
1551         r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1552         if (r) {
1553                 DMWARN("message: error getting device %s",
1554                        argv[1]);
1555                 goto out;
1556         }
1557
1558         r = action_dev(m, dev, action);
1559
1560         dm_put_device(ti, dev);
1561
1562 out:
1563         mutex_unlock(&m->work_mutex);
1564         return r;
1565 }
1566
1567 static int multipath_prepare_ioctl(struct dm_target *ti,
1568                 struct block_device **bdev, fmode_t *mode)
1569 {
1570         struct multipath *m = ti->private;
1571         unsigned long flags;
1572         int r;
1573
1574         spin_lock_irqsave(&m->lock, flags);
1575
1576         if (!m->current_pgpath)
1577                 __choose_pgpath(m, 0);
1578
1579         if (m->current_pgpath) {
1580                 if (!m->queue_io) {
1581                         *bdev = m->current_pgpath->path.dev->bdev;
1582                         *mode = m->current_pgpath->path.dev->mode;
1583                         r = 0;
1584                 } else {
1585                         /* pg_init has not started or completed */
1586                         r = -ENOTCONN;
1587                 }
1588         } else {
1589                 /* No path is available */
1590                 if (m->queue_if_no_path)
1591                         r = -ENOTCONN;
1592                 else
1593                         r = -EIO;
1594         }
1595
1596         spin_unlock_irqrestore(&m->lock, flags);
1597
1598         if (r == -ENOTCONN) {
1599                 spin_lock_irqsave(&m->lock, flags);
1600                 if (!m->current_pg) {
1601                         /* Path status changed, redo selection */
1602                         __choose_pgpath(m, 0);
1603                 }
1604                 if (m->pg_init_required)
1605                         __pg_init_all_paths(m);
1606                 spin_unlock_irqrestore(&m->lock, flags);
1607                 dm_table_run_md_queue_async(m->ti->table);
1608         }
1609
1610         /*
1611          * Only pass ioctls through if the device sizes match exactly.
1612          */
1613         if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1614                 return 1;
1615         return r;
1616 }
1617
1618 static int multipath_iterate_devices(struct dm_target *ti,
1619                                      iterate_devices_callout_fn fn, void *data)
1620 {
1621         struct multipath *m = ti->private;
1622         struct priority_group *pg;
1623         struct pgpath *p;
1624         int ret = 0;
1625
1626         list_for_each_entry(pg, &m->priority_groups, list) {
1627                 list_for_each_entry(p, &pg->pgpaths, list) {
1628                         ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1629                         if (ret)
1630                                 goto out;
1631                 }
1632         }
1633
1634 out:
1635         return ret;
1636 }
1637
1638 static int __pgpath_busy(struct pgpath *pgpath)
1639 {
1640         struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1641
1642         return blk_lld_busy(q);
1643 }
1644
1645 /*
1646  * We return "busy", only when we can map I/Os but underlying devices
1647  * are busy (so even if we map I/Os now, the I/Os will wait on
1648  * the underlying queue).
1649  * In other words, if we want to kill I/Os or queue them inside us
1650  * due to map unavailability, we don't return "busy".  Otherwise,
1651  * dm core won't give us the I/Os and we can't do what we want.
1652  */
1653 static int multipath_busy(struct dm_target *ti)
1654 {
1655         int busy = 0, has_active = 0;
1656         struct multipath *m = ti->private;
1657         struct priority_group *pg;
1658         struct pgpath *pgpath;
1659         unsigned long flags;
1660
1661         spin_lock_irqsave(&m->lock, flags);
1662
1663         /* pg_init in progress or no paths available */
1664         if (m->pg_init_in_progress ||
1665             (!m->nr_valid_paths && m->queue_if_no_path)) {
1666                 busy = 1;
1667                 goto out;
1668         }
1669         /* Guess which priority_group will be used at next mapping time */
1670         if (unlikely(!m->current_pgpath && m->next_pg))
1671                 pg = m->next_pg;
1672         else if (likely(m->current_pg))
1673                 pg = m->current_pg;
1674         else
1675                 /*
1676                  * We don't know which pg will be used at next mapping time.
1677                  * We don't call __choose_pgpath() here to avoid to trigger
1678                  * pg_init just by busy checking.
1679                  * So we don't know whether underlying devices we will be using
1680                  * at next mapping time are busy or not. Just try mapping.
1681                  */
1682                 goto out;
1683
1684         /*
1685          * If there is one non-busy active path at least, the path selector
1686          * will be able to select it. So we consider such a pg as not busy.
1687          */
1688         busy = 1;
1689         list_for_each_entry(pgpath, &pg->pgpaths, list)
1690                 if (pgpath->is_active) {
1691                         has_active = 1;
1692
1693                         if (!__pgpath_busy(pgpath)) {
1694                                 busy = 0;
1695                                 break;
1696                         }
1697                 }
1698
1699         if (!has_active)
1700                 /*
1701                  * No active path in this pg, so this pg won't be used and
1702                  * the current_pg will be changed at next mapping time.
1703                  * We need to try mapping to determine it.
1704                  */
1705                 busy = 0;
1706
1707 out:
1708         spin_unlock_irqrestore(&m->lock, flags);
1709
1710         return busy;
1711 }
1712
1713 /*-----------------------------------------------------------------
1714  * Module setup
1715  *---------------------------------------------------------------*/
1716 static struct target_type multipath_target = {
1717         .name = "multipath",
1718         .version = {1, 11, 0},
1719         .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1720         .module = THIS_MODULE,
1721         .ctr = multipath_ctr,
1722         .dtr = multipath_dtr,
1723         .map_rq = multipath_map,
1724         .clone_and_map_rq = multipath_clone_and_map,
1725         .release_clone_rq = multipath_release_clone,
1726         .rq_end_io = multipath_end_io,
1727         .presuspend = multipath_presuspend,
1728         .postsuspend = multipath_postsuspend,
1729         .resume = multipath_resume,
1730         .status = multipath_status,
1731         .message = multipath_message,
1732         .prepare_ioctl = multipath_prepare_ioctl,
1733         .iterate_devices = multipath_iterate_devices,
1734         .busy = multipath_busy,
1735 };
1736
1737 static int __init dm_multipath_init(void)
1738 {
1739         int r;
1740
1741         /* allocate a slab for the dm_ios */
1742         _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1743         if (!_mpio_cache)
1744                 return -ENOMEM;
1745
1746         r = dm_register_target(&multipath_target);
1747         if (r < 0) {
1748                 DMERR("register failed %d", r);
1749                 r = -EINVAL;
1750                 goto bad_register_target;
1751         }
1752
1753         kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1754         if (!kmultipathd) {
1755                 DMERR("failed to create workqueue kmpathd");
1756                 r = -ENOMEM;
1757                 goto bad_alloc_kmultipathd;
1758         }
1759
1760         /*
1761          * A separate workqueue is used to handle the device handlers
1762          * to avoid overloading existing workqueue. Overloading the
1763          * old workqueue would also create a bottleneck in the
1764          * path of the storage hardware device activation.
1765          */
1766         kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1767                                                   WQ_MEM_RECLAIM);
1768         if (!kmpath_handlerd) {
1769                 DMERR("failed to create workqueue kmpath_handlerd");
1770                 r = -ENOMEM;
1771                 goto bad_alloc_kmpath_handlerd;
1772         }
1773
1774         DMINFO("version %u.%u.%u loaded",
1775                multipath_target.version[0], multipath_target.version[1],
1776                multipath_target.version[2]);
1777
1778         return 0;
1779
1780 bad_alloc_kmpath_handlerd:
1781         destroy_workqueue(kmultipathd);
1782 bad_alloc_kmultipathd:
1783         dm_unregister_target(&multipath_target);
1784 bad_register_target:
1785         kmem_cache_destroy(_mpio_cache);
1786
1787         return r;
1788 }
1789
1790 static void __exit dm_multipath_exit(void)
1791 {
1792         destroy_workqueue(kmpath_handlerd);
1793         destroy_workqueue(kmultipathd);
1794
1795         dm_unregister_target(&multipath_target);
1796         kmem_cache_destroy(_mpio_cache);
1797 }
1798
1799 module_init(dm_multipath_init);
1800 module_exit(dm_multipath_exit);
1801
1802 MODULE_DESCRIPTION(DM_NAME " multipath target");
1803 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1804 MODULE_LICENSE("GPL");