blob: c35160786cf4ea93945ce7730ee37e2731902444 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010011#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20#include <linux/workqueue.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070021#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070022#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Alasdair G Kergon72d94862006-06-26 00:27:35 -070024#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000025#define DM_PG_INIT_DELAY_MSECS 2000
26#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28/* Path properties */
29struct pgpath {
30 struct list_head list;
31
32 struct priority_group *pg; /* Owning PG */
Kiyoshi Ueda66800732008-10-10 13:36:58 +010033 unsigned is_active; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 unsigned fail_count; /* Cumulative failure count */
35
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080036 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000037 struct delayed_work activate_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038};
39
40#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
41
42/*
43 * Paths are grouped into Priority Groups and numbered from 1 upwards.
44 * Each has a path selector which controls which path gets used.
45 */
46struct priority_group {
47 struct list_head list;
48
49 struct multipath *m; /* Owning multipath instance */
50 struct path_selector ps;
51
52 unsigned pg_num; /* Reference number */
53 unsigned bypassed; /* Temporarily bypass this PG? */
54
55 unsigned nr_pgpaths; /* Number of paths in PG */
56 struct list_head pgpaths;
57};
58
59/* Multipath context */
60struct multipath {
61 struct list_head list;
62 struct dm_target *ti;
63
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070064 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070065 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000066
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010067 spinlock_t lock;
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 unsigned nr_priority_groups;
70 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000071
72 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 unsigned pg_init_required; /* pg_init needs calling? */
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -070075 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000076 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78 unsigned nr_valid_paths; /* Total number of usable paths */
79 struct pgpath *current_pgpath;
80 struct priority_group *current_pg;
81 struct priority_group *next_pg; /* Switch to this PG if set */
82 unsigned repeat_count; /* I/Os left before calling PS again */
83
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010084 unsigned queue_io:1; /* Must we queue all I/O? */
85 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
86 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
87
Dave Wysochanskic9e45582007-10-19 22:47:53 +010088 unsigned pg_init_retries; /* Number of times to retry pg_init */
89 unsigned pg_init_count; /* Number of times pg_init called */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000090 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010092 unsigned queue_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 struct work_struct process_queued_ios;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +010094 struct list_head queued_ios;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96 struct work_struct trigger_event;
97
98 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +010099 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 * can resubmit bios on error.
101 */
102 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +0000103
104 struct mutex work_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105};
106
107/*
108 * Context information attached to each bio we process.
109 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100110struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100112 size_t nr_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
115typedef int (*action_fn) (struct pgpath *pgpath);
116
117#define MIN_IOS 256 /* Mempool size */
118
Christoph Lametere18b8902006-12-06 20:33:20 -0800119static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700121static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000122static void process_queued_ios(struct work_struct *work);
123static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700124static void activate_path(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126
127/*-----------------------------------------------
128 * Allocation routines
129 *-----------------------------------------------*/
130
131static struct pgpath *alloc_pgpath(void)
132{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700133 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Mike Anderson224cb3e2008-08-29 09:36:09 +0200135 if (pgpath) {
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100136 pgpath->is_active = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000137 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140 return pgpath;
141}
142
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100143static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
145 kfree(pgpath);
146}
147
148static struct priority_group *alloc_priority_group(void)
149{
150 struct priority_group *pg;
151
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700152 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700154 if (pg)
155 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157 return pg;
158}
159
160static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
161{
162 struct pgpath *pgpath, *tmp;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700163 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
166 list_del(&pgpath->list);
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700167 if (m->hw_handler_name)
168 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 dm_put_device(ti, pgpath->path.dev);
170 free_pgpath(pgpath);
171 }
172}
173
174static void free_priority_group(struct priority_group *pg,
175 struct dm_target *ti)
176{
177 struct path_selector *ps = &pg->ps;
178
179 if (ps->type) {
180 ps->type->destroy(ps);
181 dm_put_path_selector(ps->type);
182 }
183
184 free_pgpaths(&pg->pgpaths, ti);
185 kfree(pg);
186}
187
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700188static struct multipath *alloc_multipath(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
190 struct multipath *m;
191
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700192 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 INIT_LIST_HEAD(&m->priority_groups);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100195 INIT_LIST_HEAD(&m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 spin_lock_init(&m->lock);
197 m->queue_io = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000198 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000199 INIT_WORK(&m->process_queued_ios, process_queued_ios);
200 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000201 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000202 mutex_init(&m->work_mutex);
Matthew Dobson93d23412006-03-26 01:37:50 -0800203 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (!m->mpio_pool) {
205 kfree(m);
206 return NULL;
207 }
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700208 m->ti = ti;
209 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211
212 return m;
213}
214
215static void free_multipath(struct multipath *m)
216{
217 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
220 list_del(&pg->list);
221 free_priority_group(pg, m->ti);
222 }
223
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700224 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700225 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 mempool_destroy(m->mpio_pool);
227 kfree(m);
228}
229
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100230static int set_mapinfo(struct multipath *m, union map_info *info)
231{
232 struct dm_mpath_io *mpio;
233
234 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
235 if (!mpio)
236 return -ENOMEM;
237
238 memset(mpio, 0, sizeof(*mpio));
239 info->ptr = mpio;
240
241 return 0;
242}
243
244static void clear_mapinfo(struct multipath *m, union map_info *info)
245{
246 struct dm_mpath_io *mpio = info->ptr;
247
248 info->ptr = NULL;
249 mempool_free(mpio, m->mpio_pool);
250}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252/*-----------------------------------------------
253 * Path selection
254 *-----------------------------------------------*/
255
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000256static void __pg_init_all_paths(struct multipath *m)
257{
258 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000259 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000260
261 m->pg_init_count++;
262 m->pg_init_required = 0;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000263 if (m->pg_init_delay_retry)
264 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
265 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000266 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
267 /* Skip failed paths */
268 if (!pgpath->is_active)
269 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000270 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
271 pg_init_delay))
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000272 m->pg_init_in_progress++;
273 }
274}
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
277{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 m->current_pg = pgpath->pg;
279
280 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700281 if (m->hw_handler_name) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 m->pg_init_required = 1;
283 m->queue_io = 1;
284 } else {
285 m->pg_init_required = 0;
286 m->queue_io = 0;
287 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100288
289 m->pg_init_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
291
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100292static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
293 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800295 struct dm_path *path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100297 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 if (!path)
299 return -ENXIO;
300
301 m->current_pgpath = path_to_pgpath(path);
302
303 if (m->current_pg != pg)
304 __switch_pg(m, m->current_pgpath);
305
306 return 0;
307}
308
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100309static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
311 struct priority_group *pg;
312 unsigned bypassed = 1;
313
314 if (!m->nr_valid_paths)
315 goto failed;
316
317 /* Were we instructed to switch PG? */
318 if (m->next_pg) {
319 pg = m->next_pg;
320 m->next_pg = NULL;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100321 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 return;
323 }
324
325 /* Don't change PG until it has no remaining paths */
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100326 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 return;
328
329 /*
330 * Loop through priority groups until we find a valid path.
331 * First time we skip PGs marked 'bypassed'.
332 * Second time we only try the ones we skipped.
333 */
334 do {
335 list_for_each_entry(pg, &m->priority_groups, list) {
336 if (pg->bypassed == bypassed)
337 continue;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100338 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 return;
340 }
341 } while (bypassed--);
342
343failed:
344 m->current_pgpath = NULL;
345 m->current_pg = NULL;
346}
347
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800348/*
349 * Check whether bios must be queued in the device-mapper core rather
350 * than here in the target.
351 *
352 * m->lock must be held on entry.
353 *
354 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
355 * same value then we are not between multipath_presuspend()
356 * and multipath_resume() calls and we have no need to check
357 * for the DMF_NOFLUSH_SUSPENDING flag.
358 */
359static int __must_push_back(struct multipath *m)
360{
361 return (m->queue_if_no_path != m->saved_queue_if_no_path &&
362 dm_noflush_suspending(m->ti));
363}
364
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100365static int map_io(struct multipath *m, struct request *clone,
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100366 union map_info *map_context, unsigned was_queued)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800368 int r = DM_MAPIO_REMAPPED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100369 size_t nr_bytes = blk_rq_bytes(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 unsigned long flags;
371 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100372 struct block_device *bdev;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100373 struct dm_mpath_io *mpio = map_context->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 spin_lock_irqsave(&m->lock, flags);
376
377 /* Do we need to select a new pgpath? */
378 if (!m->current_pgpath ||
379 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100380 __choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 pgpath = m->current_pgpath;
383
384 if (was_queued)
385 m->queue_size--;
386
387 if ((pgpath && m->queue_io) ||
Alasdair G Kergon436d4102005-07-12 15:53:03 -0700388 (!pgpath && m->queue_if_no_path)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 /* Queue for the daemon to resubmit */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100390 list_add_tail(&clone->queuelist, &m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 m->queue_size++;
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700392 if ((m->pg_init_required && !m->pg_init_in_progress) ||
393 !m->queue_io)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700394 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 pgpath = NULL;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800396 r = DM_MAPIO_SUBMITTED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100397 } else if (pgpath) {
398 bdev = pgpath->path.dev->bdev;
399 clone->q = bdev_get_queue(bdev);
400 clone->rq_disk = bdev->bd_disk;
401 } else if (__must_push_back(m))
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800402 r = DM_MAPIO_REQUEUE;
403 else
404 r = -EIO; /* Failed */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406 mpio->pgpath = pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100407 mpio->nr_bytes = nr_bytes;
408
409 if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
410 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
411 nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 spin_unlock_irqrestore(&m->lock, flags);
414
415 return r;
416}
417
418/*
419 * If we run out of usable paths, should we queue I/O or error it?
420 */
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700421static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
422 unsigned save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
424 unsigned long flags;
425
426 spin_lock_irqsave(&m->lock, flags);
427
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700428 if (save_old_value)
429 m->saved_queue_if_no_path = m->queue_if_no_path;
430 else
431 m->saved_queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 m->queue_if_no_path = queue_if_no_path;
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700433 if (!m->queue_if_no_path && m->queue_size)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700434 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 spin_unlock_irqrestore(&m->lock, flags);
437
438 return 0;
439}
440
441/*-----------------------------------------------------------------
442 * The multipath daemon is responsible for resubmitting queued ios.
443 *---------------------------------------------------------------*/
444
445static void dispatch_queued_ios(struct multipath *m)
446{
447 int r;
448 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 union map_info *info;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100450 struct request *clone, *n;
451 LIST_HEAD(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 spin_lock_irqsave(&m->lock, flags);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100454 list_splice_init(&m->queued_ios, &cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 spin_unlock_irqrestore(&m->lock, flags);
456
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100457 list_for_each_entry_safe(clone, n, &cl, queuelist) {
458 list_del_init(&clone->queuelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100460 info = dm_get_rq_mapinfo(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100462 r = map_io(m, clone, info, 1);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100463 if (r < 0) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100464 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100465 dm_kill_unmapped_request(clone, r);
466 } else if (r == DM_MAPIO_REMAPPED)
467 dm_dispatch_request(clone);
468 else if (r == DM_MAPIO_REQUEUE) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100469 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100470 dm_requeue_unmapped_request(clone);
471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
473}
474
David Howellsc4028952006-11-22 14:57:56 +0000475static void process_queued_ios(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
David Howellsc4028952006-11-22 14:57:56 +0000477 struct multipath *m =
478 container_of(work, struct multipath, process_queued_ios);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000479 struct pgpath *pgpath = NULL;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +0100480 unsigned must_queue = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 unsigned long flags;
482
483 spin_lock_irqsave(&m->lock, flags);
484
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700485 if (!m->queue_size)
486 goto out;
487
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100489 __choose_pgpath(m, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491 pgpath = m->current_pgpath;
492
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700493 if ((pgpath && !m->queue_io) ||
494 (!pgpath && !m->queue_if_no_path))
495 must_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000497 if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
498 __pg_init_all_paths(m);
499
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700500out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (!must_queue)
503 dispatch_queued_ios(m);
504}
505
506/*
507 * An event is triggered whenever a path is taken out of use.
508 * Includes path failure and PG bypass.
509 */
David Howellsc4028952006-11-22 14:57:56 +0000510static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511{
David Howellsc4028952006-11-22 14:57:56 +0000512 struct multipath *m =
513 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
515 dm_table_event(m->ti->table);
516}
517
518/*-----------------------------------------------------------------
519 * Constructor/argument parsing:
520 * <#multipath feature args> [<arg>]*
521 * <#hw_handler args> [hw_handler [<arg>]*]
522 * <#priority groups>
523 * <initial priority group>
524 * [<selector> <#selector args> [<arg>]*
525 * <#paths> <#per-path selector args>
526 * [<path> [<arg>]* ]+ ]+
527 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100528static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct dm_target *ti)
530{
531 int r;
532 struct path_selector_type *pst;
533 unsigned ps_argc;
534
Mike Snitzer498f0102011-08-02 12:32:04 +0100535 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700536 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 };
538
Mike Snitzer498f0102011-08-02 12:32:04 +0100539 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700541 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 return -EINVAL;
543 }
544
Mike Snitzer498f0102011-08-02 12:32:04 +0100545 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100546 if (r) {
547 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
551 r = pst->create(&pg->ps, ps_argc, as->argv);
552 if (r) {
553 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700554 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 return r;
556 }
557
558 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100559 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 return 0;
562}
563
Mike Snitzer498f0102011-08-02 12:32:04 +0100564static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 struct dm_target *ti)
566{
567 int r;
568 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700569 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571 /* we need at least a path arg */
572 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700573 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100574 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 }
576
577 p = alloc_pgpath();
578 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100579 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
Mike Snitzer498f0102011-08-02 12:32:04 +0100581 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000582 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700584 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 goto bad;
586 }
587
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700588 if (m->hw_handler_name) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100589 struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
590
591 r = scsi_dh_attach(q, m->hw_handler_name);
592 if (r == -EBUSY) {
593 /*
594 * Already attached to different hw_handler,
595 * try to reattach with correct one.
596 */
597 scsi_dh_detach(q);
598 r = scsi_dh_attach(q, m->hw_handler_name);
599 }
600
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700601 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100602 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700603 dm_put_device(ti, p->path.dev);
604 goto bad;
605 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700606
607 if (m->hw_handler_params) {
608 r = scsi_dh_set_params(q, m->hw_handler_params);
609 if (r < 0) {
610 ti->error = "unable to set hardware "
611 "handler parameters";
612 scsi_dh_detach(q);
613 dm_put_device(ti, p->path.dev);
614 goto bad;
615 }
616 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700617 }
618
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
620 if (r) {
621 dm_put_device(ti, p->path.dev);
622 goto bad;
623 }
624
625 return p;
626
627 bad:
628 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100629 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
Mike Snitzer498f0102011-08-02 12:32:04 +0100632static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700633 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
Mike Snitzer498f0102011-08-02 12:32:04 +0100635 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700636 {1, 1024, "invalid number of paths"},
637 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 };
639
640 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100641 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700643 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
645 if (as->argc < 2) {
646 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100647 ti->error = "not enough priority group arguments";
648 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 }
650
651 pg = alloc_priority_group();
652 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700653 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100654 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656 pg->m = m;
657
658 r = parse_path_selector(as, pg, ti);
659 if (r)
660 goto bad;
661
662 /*
663 * read the paths
664 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100665 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 if (r)
667 goto bad;
668
Mike Snitzer498f0102011-08-02 12:32:04 +0100669 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (r)
671 goto bad;
672
Mike Snitzer498f0102011-08-02 12:32:04 +0100673 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 for (i = 0; i < pg->nr_pgpaths; i++) {
675 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100676 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Mike Snitzer498f0102011-08-02 12:32:04 +0100678 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100679 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100680 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100682 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Mike Snitzer498f0102011-08-02 12:32:04 +0100684 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 path_args.argv = as->argv;
686
687 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100688 if (IS_ERR(pgpath)) {
689 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100691 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
693 pgpath->pg = pg;
694 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100695 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697
698 return pg;
699
700 bad:
701 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100702 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703}
704
Mike Snitzer498f0102011-08-02 12:32:04 +0100705static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700708 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700709 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Mike Snitzer498f0102011-08-02 12:32:04 +0100711 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700712 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 };
714
Mike Snitzer498f0102011-08-02 12:32:04 +0100715 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return -EINVAL;
717
718 if (!hw_argc)
719 return 0;
720
Mike Snitzer498f0102011-08-02 12:32:04 +0100721 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Mike Snitzer510193a2012-05-12 01:43:21 +0100722 if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
723 "scsi_dh_%s", m->hw_handler_name)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700724 ti->error = "unknown hardware handler type";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700725 ret = -EINVAL;
726 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 }
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000728
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700729 if (hw_argc > 1) {
730 char *p;
731 int i, j, len = 4;
732
733 for (i = 0; i <= hw_argc - 2; i++)
734 len += strlen(as->argv[i]) + 1;
735 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
736 if (!p) {
737 ti->error = "memory allocation failed";
738 ret = -ENOMEM;
739 goto fail;
740 }
741 j = sprintf(p, "%d", hw_argc - 1);
742 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
743 j = sprintf(p, "%s", as->argv[i]);
744 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100745 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
747 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700748fail:
749 kfree(m->hw_handler_name);
750 m->hw_handler_name = NULL;
751 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
Mike Snitzer498f0102011-08-02 12:32:04 +0100754static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755{
756 int r;
757 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700758 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100759 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Mike Snitzer498f0102011-08-02 12:32:04 +0100761 static struct dm_arg _args[] = {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000762 {0, 5, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100763 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000764 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 };
766
Mike Snitzer498f0102011-08-02 12:32:04 +0100767 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if (r)
769 return -EINVAL;
770
771 if (!argc)
772 return 0;
773
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100774 do {
Mike Snitzer498f0102011-08-02 12:32:04 +0100775 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100776 argc--;
777
Mike Snitzer498f0102011-08-02 12:32:04 +0100778 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100779 r = queue_if_no_path(m, 1, 0);
780 continue;
781 }
782
Mike Snitzer498f0102011-08-02 12:32:04 +0100783 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100784 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100785 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100786 argc--;
787 continue;
788 }
789
Mike Snitzer498f0102011-08-02 12:32:04 +0100790 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000791 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100792 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000793 argc--;
794 continue;
795 }
796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100798 r = -EINVAL;
799 } while (argc && !r);
800
801 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
803
804static int multipath_ctr(struct dm_target *ti, unsigned int argc,
805 char **argv)
806{
Mike Snitzer498f0102011-08-02 12:32:04 +0100807 /* target arguments */
808 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +0000809 {0, 1024, "invalid number of priority groups"},
810 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 };
812
813 int r;
814 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +0100815 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 unsigned pg_count = 0;
817 unsigned next_pg_num;
818
819 as.argc = argc;
820 as.argv = argv;
821
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700822 m = alloc_multipath(ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700824 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 return -EINVAL;
826 }
827
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700828 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (r)
830 goto bad;
831
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700832 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 if (r)
834 goto bad;
835
Mike Snitzer498f0102011-08-02 12:32:04 +0100836 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 if (r)
838 goto bad;
839
Mike Snitzer498f0102011-08-02 12:32:04 +0100840 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 if (r)
842 goto bad;
843
Mike Snitzera490a072011-03-24 13:54:33 +0000844 if ((!m->nr_priority_groups && next_pg_num) ||
845 (m->nr_priority_groups && !next_pg_num)) {
846 ti->error = "invalid initial priority group";
847 r = -EINVAL;
848 goto bad;
849 }
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /* parse the priority groups */
852 while (as.argc) {
853 struct priority_group *pg;
854
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700855 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100856 if (IS_ERR(pg)) {
857 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 goto bad;
859 }
860
861 m->nr_valid_paths += pg->nr_pgpaths;
862 list_add_tail(&pg->list, &m->priority_groups);
863 pg_count++;
864 pg->pg_num = pg_count;
865 if (!--next_pg_num)
866 m->next_pg = pg;
867 }
868
869 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700870 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 r = -EINVAL;
872 goto bad;
873 }
874
Mikulas Patocka86279212009-06-22 10:12:24 +0100875 ti->num_flush_requests = 1;
Mike Snitzer959eb4e2010-08-12 04:14:32 +0100876 ti->num_discard_requests = 1;
Mikulas Patocka86279212009-06-22 10:12:24 +0100877
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return 0;
879
880 bad:
881 free_multipath(m);
882 return r;
883}
884
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000885static void multipath_wait_for_pg_init_completion(struct multipath *m)
886{
887 DECLARE_WAITQUEUE(wait, current);
888 unsigned long flags;
889
890 add_wait_queue(&m->pg_init_wait, &wait);
891
892 while (1) {
893 set_current_state(TASK_UNINTERRUPTIBLE);
894
895 spin_lock_irqsave(&m->lock, flags);
896 if (!m->pg_init_in_progress) {
897 spin_unlock_irqrestore(&m->lock, flags);
898 break;
899 }
900 spin_unlock_irqrestore(&m->lock, flags);
901
902 io_schedule();
903 }
904 set_current_state(TASK_RUNNING);
905
906 remove_wait_queue(&m->pg_init_wait, &wait);
907}
908
909static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700911 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000912 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -0700913 flush_workqueue(kmultipathd);
Tejun Heod5ffa382011-01-13 19:59:56 +0000914 flush_work_sync(&m->trigger_event);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +0000915}
916
917static void multipath_dtr(struct dm_target *ti)
918{
919 struct multipath *m = ti->private;
920
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000921 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 free_multipath(m);
923}
924
925/*
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100926 * Map cloned requests
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100928static int multipath_map(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 union map_info *map_context)
930{
931 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 struct multipath *m = (struct multipath *) ti->private;
933
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100934 if (set_mapinfo(m, map_context) < 0)
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100935 /* ENOMEM, requeue */
936 return DM_MAPIO_REQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100938 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100939 r = map_io(m, clone, map_context, 0);
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800940 if (r < 0 || r == DM_MAPIO_REQUEUE)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100941 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
943 return r;
944}
945
946/*
947 * Take a path out of use.
948 */
949static int fail_path(struct pgpath *pgpath)
950{
951 unsigned long flags;
952 struct multipath *m = pgpath->pg->m;
953
954 spin_lock_irqsave(&m->lock, flags);
955
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100956 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 goto out;
958
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700959 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
961 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100962 pgpath->is_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 pgpath->fail_count++;
964
965 m->nr_valid_paths--;
966
967 if (pgpath == m->current_pgpath)
968 m->current_pgpath = NULL;
969
Mike Andersonb15546f2007-10-19 22:48:02 +0100970 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
971 pgpath->path.dev->name, m->nr_valid_paths);
972
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +0000973 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975out:
976 spin_unlock_irqrestore(&m->lock, flags);
977
978 return 0;
979}
980
981/*
982 * Reinstate a previously-failed path
983 */
984static int reinstate_path(struct pgpath *pgpath)
985{
986 int r = 0;
987 unsigned long flags;
988 struct multipath *m = pgpath->pg->m;
989
990 spin_lock_irqsave(&m->lock, flags);
991
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100992 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 goto out;
994
Alasdair G Kergondef052d2008-07-21 12:00:31 +0100995 if (!pgpath->pg->ps.type->reinstate_path) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 DMWARN("Reinstate path not supported by path selector %s",
997 pgpath->pg->ps.type->name);
998 r = -EINVAL;
999 goto out;
1000 }
1001
1002 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1003 if (r)
1004 goto out;
1005
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001006 pgpath->is_active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001008 if (!m->nr_valid_paths++ && m->queue_size) {
1009 m->current_pgpath = NULL;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001010 queue_work(kmultipathd, &m->process_queued_ios);
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001011 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001012 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001013 m->pg_init_in_progress++;
1014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Mike Andersonb15546f2007-10-19 22:48:02 +01001016 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1017 pgpath->path.dev->name, m->nr_valid_paths);
1018
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001019 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021out:
1022 spin_unlock_irqrestore(&m->lock, flags);
1023
1024 return r;
1025}
1026
1027/*
1028 * Fail or reinstate all paths that match the provided struct dm_dev.
1029 */
1030static int action_dev(struct multipath *m, struct dm_dev *dev,
1031 action_fn action)
1032{
Mike Snitzer19040c02011-03-24 13:54:31 +00001033 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 struct pgpath *pgpath;
1035 struct priority_group *pg;
1036
1037 list_for_each_entry(pg, &m->priority_groups, list) {
1038 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1039 if (pgpath->path.dev == dev)
1040 r = action(pgpath);
1041 }
1042 }
1043
1044 return r;
1045}
1046
1047/*
1048 * Temporarily try to avoid having to use the specified PG
1049 */
1050static void bypass_pg(struct multipath *m, struct priority_group *pg,
1051 int bypassed)
1052{
1053 unsigned long flags;
1054
1055 spin_lock_irqsave(&m->lock, flags);
1056
1057 pg->bypassed = bypassed;
1058 m->current_pgpath = NULL;
1059 m->current_pg = NULL;
1060
1061 spin_unlock_irqrestore(&m->lock, flags);
1062
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001063 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064}
1065
1066/*
1067 * Switch to using the specified PG from the next I/O that gets mapped
1068 */
1069static int switch_pg_num(struct multipath *m, const char *pgstr)
1070{
1071 struct priority_group *pg;
1072 unsigned pgnum;
1073 unsigned long flags;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001074 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001076 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 (pgnum > m->nr_priority_groups)) {
1078 DMWARN("invalid PG number supplied to switch_pg_num");
1079 return -EINVAL;
1080 }
1081
1082 spin_lock_irqsave(&m->lock, flags);
1083 list_for_each_entry(pg, &m->priority_groups, list) {
1084 pg->bypassed = 0;
1085 if (--pgnum)
1086 continue;
1087
1088 m->current_pgpath = NULL;
1089 m->current_pg = NULL;
1090 m->next_pg = pg;
1091 }
1092 spin_unlock_irqrestore(&m->lock, flags);
1093
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001094 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 return 0;
1096}
1097
1098/*
1099 * Set/clear bypassed status of a PG.
1100 * PGs are numbered upwards from 1 in the order they were declared.
1101 */
1102static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1103{
1104 struct priority_group *pg;
1105 unsigned pgnum;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001106 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001108 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 (pgnum > m->nr_priority_groups)) {
1110 DMWARN("invalid PG number supplied to bypass_pg");
1111 return -EINVAL;
1112 }
1113
1114 list_for_each_entry(pg, &m->priority_groups, list) {
1115 if (!--pgnum)
1116 break;
1117 }
1118
1119 bypass_pg(m, pg, bypassed);
1120 return 0;
1121}
1122
1123/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001124 * Should we retry pg_init immediately?
1125 */
1126static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1127{
1128 unsigned long flags;
1129 int limit_reached = 0;
1130
1131 spin_lock_irqsave(&m->lock, flags);
1132
1133 if (m->pg_init_count <= m->pg_init_retries)
1134 m->pg_init_required = 1;
1135 else
1136 limit_reached = 1;
1137
1138 spin_unlock_irqrestore(&m->lock, flags);
1139
1140 return limit_reached;
1141}
1142
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001143static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001144{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001145 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001146 struct priority_group *pg = pgpath->pg;
1147 struct multipath *m = pg->m;
1148 unsigned long flags;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001149 unsigned delay_retry = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001150
1151 /* device or driver problems */
1152 switch (errors) {
1153 case SCSI_DH_OK:
1154 break;
1155 case SCSI_DH_NOSYS:
1156 if (!m->hw_handler_name) {
1157 errors = 0;
1158 break;
1159 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001160 DMERR("Could not failover the device: Handler scsi_dh_%s "
1161 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001162 /*
1163 * Fail path for now, so we do not ping pong
1164 */
1165 fail_path(pgpath);
1166 break;
1167 case SCSI_DH_DEV_TEMP_BUSY:
1168 /*
1169 * Probably doing something like FW upgrade on the
1170 * controller so try the other pg.
1171 */
1172 bypass_pg(m, pg, 1);
1173 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001174 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001175 /* Wait before retrying. */
1176 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001177 case SCSI_DH_IMM_RETRY:
1178 case SCSI_DH_RES_TEMP_UNAVAIL:
1179 if (pg_init_limit_reached(m, pgpath))
1180 fail_path(pgpath);
1181 errors = 0;
1182 break;
1183 default:
1184 /*
1185 * We probably do not want to fail the path for a device
1186 * error, but this is what the old dm did. In future
1187 * patches we can do more advanced handling.
1188 */
1189 fail_path(pgpath);
1190 }
1191
1192 spin_lock_irqsave(&m->lock, flags);
1193 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001194 if (pgpath == m->current_pgpath) {
1195 DMERR("Could not failover device. Error %d.", errors);
1196 m->current_pgpath = NULL;
1197 m->current_pg = NULL;
1198 }
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001199 } else if (!m->pg_init_required)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001200 pg->bypassed = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001201
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001202 if (--m->pg_init_in_progress)
1203 /* Activations of other paths are still on going */
1204 goto out;
1205
1206 if (!m->pg_init_required)
1207 m->queue_io = 0;
1208
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001209 m->pg_init_delay_retry = delay_retry;
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001210 queue_work(kmultipathd, &m->process_queued_ios);
1211
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001212 /*
1213 * Wake up any thread waiting to suspend.
1214 */
1215 wake_up(&m->pg_init_wait);
1216
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001217out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001218 spin_unlock_irqrestore(&m->lock, flags);
1219}
1220
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001221static void activate_path(struct work_struct *work)
1222{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001223 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001224 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001225
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001226 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001227 pg_init_done, pgpath);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001228}
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230/*
1231 * end_io handling
1232 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001233static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001234 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001236 /*
1237 * We don't queue any clone request inside the multipath target
1238 * during end I/O handling, since those clone requests don't have
1239 * bio clones. If we queue them inside the multipath target,
1240 * we need to make bio clones, that requires memory allocation.
1241 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1242 * don't have bio clones.)
1243 * Instead of queueing the clone request here, we queue the original
1244 * request into dm core, which will remake a clone request and
1245 * clone bios for it and resubmit it later.
1246 */
1247 int r = DM_ENDIO_REQUEUE;
Stefan Bader640eb3b2005-11-21 21:32:35 -08001248 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001250 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 return 0; /* I/O complete */
1252
Martin K. Petersen6f13f6f2011-05-29 13:02:55 +01001253 if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001254 return error;
1255
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001256 if (mpio->pgpath)
1257 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Stefan Bader640eb3b2005-11-21 21:32:35 -08001259 spin_lock_irqsave(&m->lock, flags);
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001260 if (!m->nr_valid_paths) {
1261 if (!m->queue_if_no_path) {
1262 if (!__must_push_back(m))
1263 r = -EIO;
1264 } else {
1265 if (error == -EBADE)
1266 r = error;
1267 }
1268 }
Stefan Bader640eb3b2005-11-21 21:32:35 -08001269 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001271 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272}
1273
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001274static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 int error, union map_info *map_context)
1276{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001277 struct multipath *m = ti->private;
1278 struct dm_mpath_io *mpio = map_context->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 struct pgpath *pgpath = mpio->pgpath;
1280 struct path_selector *ps;
1281 int r;
1282
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001283 BUG_ON(!mpio);
1284
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001285 r = do_end_io(m, clone, error, mpio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 if (pgpath) {
1287 ps = &pgpath->pg->ps;
1288 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001289 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001291 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
1293 return r;
1294}
1295
1296/*
1297 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001298 * the last path fails we must error any remaining I/O.
1299 * Note that if the freeze_bdev fails while suspending, the
1300 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 */
1302static void multipath_presuspend(struct dm_target *ti)
1303{
1304 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
Alasdair G Kergon485ef692005-09-27 21:45:45 -07001306 queue_if_no_path(m, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001309static void multipath_postsuspend(struct dm_target *ti)
1310{
Mike Anderson6380f262009-12-10 23:52:21 +00001311 struct multipath *m = ti->private;
1312
1313 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001314 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001315 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001316}
1317
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001318/*
1319 * Restore the queue_if_no_path setting.
1320 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321static void multipath_resume(struct dm_target *ti)
1322{
1323 struct multipath *m = (struct multipath *) ti->private;
1324 unsigned long flags;
1325
1326 spin_lock_irqsave(&m->lock, flags);
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001327 m->queue_if_no_path = m->saved_queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 spin_unlock_irqrestore(&m->lock, flags);
1329}
1330
1331/*
1332 * Info output has the following format:
1333 * num_multipath_feature_args [multipath_feature_args]*
1334 * num_handler_status_args [handler_status_args]*
1335 * num_groups init_group_number
1336 * [A|D|E num_ps_status_args [ps_status_args]*
1337 * num_paths num_selector_args
1338 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1339 *
1340 * Table output has the following format (identical to the constructor string):
1341 * num_feature_args [features_args]*
1342 * num_handler_args hw_handler [hw_handler_args]*
1343 * num_groups init_group_number
1344 * [priority selector-name num_ps_args [ps_args]*
1345 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1346 */
1347static int multipath_status(struct dm_target *ti, status_type_t type,
1348 char *result, unsigned int maxlen)
1349{
1350 int sz = 0;
1351 unsigned long flags;
1352 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 struct priority_group *pg;
1354 struct pgpath *p;
1355 unsigned pg_num;
1356 char state;
1357
1358 spin_lock_irqsave(&m->lock, flags);
1359
1360 /* Features */
1361 if (type == STATUSTYPE_INFO)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001362 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1363 else {
1364 DMEMIT("%u ", m->queue_if_no_path +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001365 (m->pg_init_retries > 0) * 2 +
1366 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001367 if (m->queue_if_no_path)
1368 DMEMIT("queue_if_no_path ");
1369 if (m->pg_init_retries)
1370 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001371 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1372 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001375 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 DMEMIT("0 ");
1377 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001378 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380 DMEMIT("%u ", m->nr_priority_groups);
1381
1382 if (m->next_pg)
1383 pg_num = m->next_pg->pg_num;
1384 else if (m->current_pg)
1385 pg_num = m->current_pg->pg_num;
1386 else
Mike Snitzera490a072011-03-24 13:54:33 +00001387 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389 DMEMIT("%u ", pg_num);
1390
1391 switch (type) {
1392 case STATUSTYPE_INFO:
1393 list_for_each_entry(pg, &m->priority_groups, list) {
1394 if (pg->bypassed)
1395 state = 'D'; /* Disabled */
1396 else if (pg == m->current_pg)
1397 state = 'A'; /* Currently Active */
1398 else
1399 state = 'E'; /* Enabled */
1400
1401 DMEMIT("%c ", state);
1402
1403 if (pg->ps.type->status)
1404 sz += pg->ps.type->status(&pg->ps, NULL, type,
1405 result + sz,
1406 maxlen - sz);
1407 else
1408 DMEMIT("0 ");
1409
1410 DMEMIT("%u %u ", pg->nr_pgpaths,
1411 pg->ps.type->info_args);
1412
1413 list_for_each_entry(p, &pg->pgpaths, list) {
1414 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001415 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 p->fail_count);
1417 if (pg->ps.type->status)
1418 sz += pg->ps.type->status(&pg->ps,
1419 &p->path, type, result + sz,
1420 maxlen - sz);
1421 }
1422 }
1423 break;
1424
1425 case STATUSTYPE_TABLE:
1426 list_for_each_entry(pg, &m->priority_groups, list) {
1427 DMEMIT("%s ", pg->ps.type->name);
1428
1429 if (pg->ps.type->status)
1430 sz += pg->ps.type->status(&pg->ps, NULL, type,
1431 result + sz,
1432 maxlen - sz);
1433 else
1434 DMEMIT("0 ");
1435
1436 DMEMIT("%u %u ", pg->nr_pgpaths,
1437 pg->ps.type->table_args);
1438
1439 list_for_each_entry(p, &pg->pgpaths, list) {
1440 DMEMIT("%s ", p->path.dev->name);
1441 if (pg->ps.type->status)
1442 sz += pg->ps.type->status(&pg->ps,
1443 &p->path, type, result + sz,
1444 maxlen - sz);
1445 }
1446 }
1447 break;
1448 }
1449
1450 spin_unlock_irqrestore(&m->lock, flags);
1451
1452 return 0;
1453}
1454
1455static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1456{
Mike Anderson6380f262009-12-10 23:52:21 +00001457 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 struct dm_dev *dev;
1459 struct multipath *m = (struct multipath *) ti->private;
1460 action_fn action;
1461
Mike Anderson6380f262009-12-10 23:52:21 +00001462 mutex_lock(&m->work_mutex);
1463
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001464 if (dm_suspended(ti)) {
1465 r = -EBUSY;
1466 goto out;
1467 }
1468
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001470 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001471 r = queue_if_no_path(m, 1, 0);
1472 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001473 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001474 r = queue_if_no_path(m, 0, 0);
1475 goto out;
1476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 }
1478
Mike Anderson6380f262009-12-10 23:52:21 +00001479 if (argc != 2) {
1480 DMWARN("Unrecognised multipath message received.");
1481 goto out;
1482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Mike Snitzer498f0102011-08-02 12:32:04 +01001484 if (!strcasecmp(argv[0], "disable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001485 r = bypass_pg_num(m, argv[1], 1);
1486 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001487 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001488 r = bypass_pg_num(m, argv[1], 0);
1489 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001490 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001491 r = switch_pg_num(m, argv[1]);
1492 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001493 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001495 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001497 else {
1498 DMWARN("Unrecognised multipath message received.");
1499 goto out;
1500 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001502 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001504 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001506 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 }
1508
1509 r = action_dev(m, dev, action);
1510
1511 dm_put_device(ti, dev);
1512
Mike Anderson6380f262009-12-10 23:52:21 +00001513out:
1514 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
1517
Al Viro647b3d02007-08-28 22:15:59 -04001518static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
Milan Broz9af4aa32006-10-03 01:15:20 -07001519 unsigned long arg)
1520{
1521 struct multipath *m = (struct multipath *) ti->private;
1522 struct block_device *bdev = NULL;
Al Viro633a08b2007-08-29 20:34:12 -04001523 fmode_t mode = 0;
Milan Broz9af4aa32006-10-03 01:15:20 -07001524 unsigned long flags;
1525 int r = 0;
1526
1527 spin_lock_irqsave(&m->lock, flags);
1528
1529 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001530 __choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001531
Milan Broze90dae12006-10-03 01:15:22 -07001532 if (m->current_pgpath) {
Milan Broz9af4aa32006-10-03 01:15:20 -07001533 bdev = m->current_pgpath->path.dev->bdev;
Al Viro633a08b2007-08-29 20:34:12 -04001534 mode = m->current_pgpath->path.dev->mode;
Milan Broze90dae12006-10-03 01:15:22 -07001535 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001536
1537 if (m->queue_io)
1538 r = -EAGAIN;
1539 else if (!bdev)
1540 r = -EIO;
1541
1542 spin_unlock_irqrestore(&m->lock, flags);
1543
Paolo Bonziniec8013b2012-01-12 16:01:29 +01001544 /*
1545 * Only pass ioctls through if the device sizes match exactly.
1546 */
1547 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1548 r = scsi_verify_blk_ioctl(NULL, cmd);
1549
Al Viro633a08b2007-08-29 20:34:12 -04001550 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
Milan Broz9af4aa32006-10-03 01:15:20 -07001551}
1552
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001553static int multipath_iterate_devices(struct dm_target *ti,
1554 iterate_devices_callout_fn fn, void *data)
1555{
1556 struct multipath *m = ti->private;
1557 struct priority_group *pg;
1558 struct pgpath *p;
1559 int ret = 0;
1560
1561 list_for_each_entry(pg, &m->priority_groups, list) {
1562 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001563 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001564 if (ret)
1565 goto out;
1566 }
1567 }
1568
1569out:
1570 return ret;
1571}
1572
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001573static int __pgpath_busy(struct pgpath *pgpath)
1574{
1575 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1576
1577 return dm_underlying_device_busy(q);
1578}
1579
1580/*
1581 * We return "busy", only when we can map I/Os but underlying devices
1582 * are busy (so even if we map I/Os now, the I/Os will wait on
1583 * the underlying queue).
1584 * In other words, if we want to kill I/Os or queue them inside us
1585 * due to map unavailability, we don't return "busy". Otherwise,
1586 * dm core won't give us the I/Os and we can't do what we want.
1587 */
1588static int multipath_busy(struct dm_target *ti)
1589{
1590 int busy = 0, has_active = 0;
1591 struct multipath *m = ti->private;
1592 struct priority_group *pg;
1593 struct pgpath *pgpath;
1594 unsigned long flags;
1595
1596 spin_lock_irqsave(&m->lock, flags);
1597
1598 /* Guess which priority_group will be used at next mapping time */
1599 if (unlikely(!m->current_pgpath && m->next_pg))
1600 pg = m->next_pg;
1601 else if (likely(m->current_pg))
1602 pg = m->current_pg;
1603 else
1604 /*
1605 * We don't know which pg will be used at next mapping time.
1606 * We don't call __choose_pgpath() here to avoid to trigger
1607 * pg_init just by busy checking.
1608 * So we don't know whether underlying devices we will be using
1609 * at next mapping time are busy or not. Just try mapping.
1610 */
1611 goto out;
1612
1613 /*
1614 * If there is one non-busy active path at least, the path selector
1615 * will be able to select it. So we consider such a pg as not busy.
1616 */
1617 busy = 1;
1618 list_for_each_entry(pgpath, &pg->pgpaths, list)
1619 if (pgpath->is_active) {
1620 has_active = 1;
1621
1622 if (!__pgpath_busy(pgpath)) {
1623 busy = 0;
1624 break;
1625 }
1626 }
1627
1628 if (!has_active)
1629 /*
1630 * No active path in this pg, so this pg won't be used and
1631 * the current_pg will be changed at next mapping time.
1632 * We need to try mapping to determine it.
1633 */
1634 busy = 0;
1635
1636out:
1637 spin_unlock_irqrestore(&m->lock, flags);
1638
1639 return busy;
1640}
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642/*-----------------------------------------------------------------
1643 * Module setup
1644 *---------------------------------------------------------------*/
1645static struct target_type multipath_target = {
1646 .name = "multipath",
Mike Snitzer19040c02011-03-24 13:54:31 +00001647 .version = {1, 3, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 .module = THIS_MODULE,
1649 .ctr = multipath_ctr,
1650 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001651 .map_rq = multipath_map,
1652 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001654 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 .resume = multipath_resume,
1656 .status = multipath_status,
1657 .message = multipath_message,
Milan Broz9af4aa32006-10-03 01:15:20 -07001658 .ioctl = multipath_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001659 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001660 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661};
1662
1663static int __init dm_multipath_init(void)
1664{
1665 int r;
1666
1667 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001668 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 if (!_mpio_cache)
1670 return -ENOMEM;
1671
1672 r = dm_register_target(&multipath_target);
1673 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001674 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 kmem_cache_destroy(_mpio_cache);
1676 return -EINVAL;
1677 }
1678
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001679 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001680 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001681 DMERR("failed to create workqueue kmpathd");
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001682 dm_unregister_target(&multipath_target);
1683 kmem_cache_destroy(_mpio_cache);
1684 return -ENOMEM;
1685 }
1686
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001687 /*
1688 * A separate workqueue is used to handle the device handlers
1689 * to avoid overloading existing workqueue. Overloading the
1690 * old workqueue would also create a bottleneck in the
1691 * path of the storage hardware device activation.
1692 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001693 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1694 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001695 if (!kmpath_handlerd) {
1696 DMERR("failed to create workqueue kmpath_handlerd");
1697 destroy_workqueue(kmultipathd);
1698 dm_unregister_target(&multipath_target);
1699 kmem_cache_destroy(_mpio_cache);
1700 return -ENOMEM;
1701 }
1702
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001703 DMINFO("version %u.%u.%u loaded",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 multipath_target.version[0], multipath_target.version[1],
1705 multipath_target.version[2]);
1706
1707 return r;
1708}
1709
1710static void __exit dm_multipath_exit(void)
1711{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001712 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001713 destroy_workqueue(kmultipathd);
1714
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001715 dm_unregister_target(&multipath_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 kmem_cache_destroy(_mpio_cache);
1717}
1718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719module_init(dm_multipath_init);
1720module_exit(dm_multipath_exit);
1721
1722MODULE_DESCRIPTION(DM_NAME " multipath target");
1723MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1724MODULE_LICENSE("GPL");