blob: 44aba29154fcb9076c0825bafe7f2787e4959dcf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +01009#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <linux/init.h>
12#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080013#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
Arnd Bergmann6e9624b2010-08-07 18:25:34 +020018#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080022#include <linux/hdreg.h>
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +010023#include <linux/delay.h>
Li Zefan55782132009-06-09 13:43:05 +080024
25#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027#define DM_MSG_PREFIX "core"
28
Milan Broz60935eb2009-06-22 10:12:30 +010029/*
30 * Cookies are numeric values sent with CHANGE and REMOVE
31 * uevents while resuming, removing or renaming the device.
32 */
33#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
34#define DM_COOKIE_LENGTH 24
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036static const char *_name = DM_NAME;
37
38static unsigned int major = 0;
39static unsigned int _major = 0;
40
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070041static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000043 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * One of these is allocated per bio.
45 */
46struct dm_io {
47 struct mapped_device *md;
48 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010050 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080051 unsigned long start_time;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +010052 spinlock_t endio_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053};
54
55/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000056 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 * One of these is allocated per target within a bio. Hopefully
58 * this will be simplified out one day.
59 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010060struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 struct dm_io *io;
62 struct dm_target *ti;
63 union map_info info;
64};
65
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000066/*
67 * For request-based dm.
68 * One of these is allocated per request.
69 */
70struct dm_rq_target_io {
71 struct mapped_device *md;
72 struct dm_target *ti;
73 struct request *orig, clone;
74 int error;
75 union map_info info;
76};
77
78/*
79 * For request-based dm.
80 * One of these is allocated per bio.
81 */
82struct dm_rq_clone_bio_info {
83 struct bio *orig;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010084 struct dm_rq_target_io *tio;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000085};
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087union map_info *dm_get_mapinfo(struct bio *bio)
88{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070089 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010090 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070091 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
Kiyoshi Uedacec47e32009-06-22 10:12:35 +010094union map_info *dm_get_rq_mapinfo(struct request *rq)
95{
96 if (rq && rq->end_io_data)
97 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
98 return NULL;
99}
100EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
101
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -0700102#define MINOR_ALLOCED ((void *)-1)
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/*
105 * Bits for the md->flags field.
106 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100107#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -0800109#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700110#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700111#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800112#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100113#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Milan Broz304f3f62008-02-08 02:11:17 +0000115/*
116 * Work processed by per-device workqueue.
117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700119 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000120 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 rwlock_t map_lock;
122 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700123 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 unsigned long flags;
126
Jens Axboe165125e2007-07-24 09:28:11 +0200127 struct request_queue *queue;
Mike Snitzera5664da2010-08-12 04:14:01 +0100128 unsigned type;
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +0100129 /* Protect queue and type against concurrent access. */
Mike Snitzera5664da2010-08-12 04:14:01 +0100130 struct mutex type_lock;
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800133 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135 void *interface_ptr;
136
137 /*
138 * A list of ios that arrived while we were suspended.
139 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200140 atomic_t pending[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100142 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800143 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100144 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 /*
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100147 * An error from the barrier request currently being processed.
148 */
149 int barrier_error;
150
151 /*
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000152 * Protect barrier_error from concurrent endio processing
153 * in request-based dm.
154 */
155 spinlock_t barrier_error_lock;
156
157 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000158 * Processing queue (flush/barriers)
159 */
160 struct workqueue_struct *wq;
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000161 struct work_struct barrier_work;
162
163 /* A pointer to the currently processing pre/post flush request */
164 struct request *flush_request;
Milan Broz304f3f62008-02-08 02:11:17 +0000165
166 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 * The current mapping.
168 */
169 struct dm_table *map;
170
171 /*
172 * io objects are allocated from here.
173 */
174 mempool_t *io_pool;
175 mempool_t *tio_pool;
176
Stefan Bader9faf4002006-10-03 01:15:41 -0700177 struct bio_set *bs;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /*
180 * Event handling.
181 */
182 atomic_t event_nr;
183 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100184 atomic_t uevent_seq;
185 struct list_head uevent_list;
186 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 /*
189 * freeze/thaw support require holding onto a super block
190 */
191 struct super_block *frozen_sb;
Mikulas Patockadb8fef42009-06-22 10:12:15 +0100192 struct block_device *bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800193
194 /* forced geometry settings */
195 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000196
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100197 /* For saving the address of __make_request for request based dm */
198 make_request_fn *saved_make_request_fn;
199
Milan Broz784aae72009-01-06 03:05:12 +0000200 /* sysfs handle */
201 struct kobject kobj;
Mikulas Patocka52b1fd52009-06-22 10:12:21 +0100202
203 /* zero-length barrier that will be cloned and submitted to targets */
204 struct bio barrier_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205};
206
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100207/*
208 * For mempools pre-allocation at the table loading time.
209 */
210struct dm_md_mempools {
211 mempool_t *io_pool;
212 mempool_t *tio_pool;
213 struct bio_set *bs;
214};
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800217static struct kmem_cache *_io_cache;
218static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000219static struct kmem_cache *_rq_tio_cache;
220static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222static int __init local_init(void)
223{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100224 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100227 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100229 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100232 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100233 if (!_tio_cache)
234 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000236 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
237 if (!_rq_tio_cache)
238 goto out_free_tio_cache;
239
240 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
241 if (!_rq_bio_info_cache)
242 goto out_free_rq_tio_cache;
243
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100244 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100245 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000246 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 _major = major;
249 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100250 if (r < 0)
251 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 if (!_major)
254 _major = r;
255
256 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100257
258out_uevent_exit:
259 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000260out_free_rq_bio_info_cache:
261 kmem_cache_destroy(_rq_bio_info_cache);
262out_free_rq_tio_cache:
263 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100264out_free_tio_cache:
265 kmem_cache_destroy(_tio_cache);
266out_free_io_cache:
267 kmem_cache_destroy(_io_cache);
268
269 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
272static void local_exit(void)
273{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000274 kmem_cache_destroy(_rq_bio_info_cache);
275 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 kmem_cache_destroy(_tio_cache);
277 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700278 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100279 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281 _major = 0;
282
283 DMINFO("cleaned up");
284}
285
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000286static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 local_init,
288 dm_target_init,
289 dm_linear_init,
290 dm_stripe_init,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000291 dm_io_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100292 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 dm_interface_init,
294};
295
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000296static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 local_exit,
298 dm_target_exit,
299 dm_linear_exit,
300 dm_stripe_exit,
Mikulas Patocka952b3552009-12-10 23:51:57 +0000301 dm_io_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100302 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 dm_interface_exit,
304};
305
306static int __init dm_init(void)
307{
308 const int count = ARRAY_SIZE(_inits);
309
310 int r, i;
311
312 for (i = 0; i < count; i++) {
313 r = _inits[i]();
314 if (r)
315 goto bad;
316 }
317
318 return 0;
319
320 bad:
321 while (i--)
322 _exits[i]();
323
324 return r;
325}
326
327static void __exit dm_exit(void)
328{
329 int i = ARRAY_SIZE(_exits);
330
331 while (i--)
332 _exits[i]();
333}
334
335/*
336 * Block device functions
337 */
Mike Anderson432a2122009-12-10 23:52:20 +0000338int dm_deleting_md(struct mapped_device *md)
339{
340 return test_bit(DMF_DELETING, &md->flags);
341}
342
Al Virofe5f9f22008-03-02 10:29:31 -0500343static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
345 struct mapped_device *md;
346
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200347 lock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700348 spin_lock(&_minor_lock);
349
Al Virofe5f9f22008-03-02 10:29:31 -0500350 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700351 if (!md)
352 goto out;
353
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700354 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +0000355 dm_deleting_md(md)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700356 md = NULL;
357 goto out;
358 }
359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700361 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700362
363out:
364 spin_unlock(&_minor_lock);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200365 unlock_kernel();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700366
367 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368}
369
Al Virofe5f9f22008-03-02 10:29:31 -0500370static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Al Virofe5f9f22008-03-02 10:29:31 -0500372 struct mapped_device *md = disk->private_data;
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200373
374 lock_kernel();
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700375 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 dm_put(md);
Arnd Bergmann6e9624b2010-08-07 18:25:34 +0200377 unlock_kernel();
378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 return 0;
380}
381
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700382int dm_open_count(struct mapped_device *md)
383{
384 return atomic_read(&md->open_count);
385}
386
387/*
388 * Guarantees nothing is using the device before it's deleted.
389 */
390int dm_lock_for_deletion(struct mapped_device *md)
391{
392 int r = 0;
393
394 spin_lock(&_minor_lock);
395
396 if (dm_open_count(md))
397 r = -EBUSY;
398 else
399 set_bit(DMF_DELETING, &md->flags);
400
401 spin_unlock(&_minor_lock);
402
403 return r;
404}
405
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800406static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
407{
408 struct mapped_device *md = bdev->bd_disk->private_data;
409
410 return dm_get_geometry(md, geo);
411}
412
Al Virofe5f9f22008-03-02 10:29:31 -0500413static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700414 unsigned int cmd, unsigned long arg)
415{
Al Virofe5f9f22008-03-02 10:29:31 -0500416 struct mapped_device *md = bdev->bd_disk->private_data;
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000417 struct dm_table *map = dm_get_live_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700418 struct dm_target *tgt;
419 int r = -ENOTTY;
420
Milan Brozaa129a22006-10-03 01:15:15 -0700421 if (!map || !dm_table_get_size(map))
422 goto out;
423
424 /* We only support devices that have a single target */
425 if (dm_table_get_num_targets(map) != 1)
426 goto out;
427
428 tgt = dm_table_get_target(map, 0);
429
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +0000430 if (dm_suspended_md(md)) {
Milan Brozaa129a22006-10-03 01:15:15 -0700431 r = -EAGAIN;
432 goto out;
433 }
434
435 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400436 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700437
438out:
439 dm_table_put(map);
440
Milan Brozaa129a22006-10-03 01:15:15 -0700441 return r;
442}
443
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100444static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
446 return mempool_alloc(md->io_pool, GFP_NOIO);
447}
448
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100449static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
451 mempool_free(io, md->io_pool);
452}
453
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100454static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
456 mempool_free(tio, md->tio_pool);
457}
458
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000459static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
460 gfp_t gfp_mask)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100461{
Kiyoshi Ueda08885642009-12-10 23:52:15 +0000462 return mempool_alloc(md->tio_pool, gfp_mask);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100463}
464
465static void free_rq_tio(struct dm_rq_target_io *tio)
466{
467 mempool_free(tio, tio->md->tio_pool);
468}
469
470static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
471{
472 return mempool_alloc(md->io_pool, GFP_ATOMIC);
473}
474
475static void free_bio_info(struct dm_rq_clone_bio_info *info)
476{
477 mempool_free(info, info->tio->md->io_pool);
478}
479
Kiyoshi Ueda90abb8c2009-12-10 23:52:13 +0000480static int md_in_flight(struct mapped_device *md)
481{
482 return atomic_read(&md->pending[READ]) +
483 atomic_read(&md->pending[WRITE]);
484}
485
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800486static void start_io_acct(struct dm_io *io)
487{
488 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900489 int cpu;
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200490 int rw = bio_data_dir(io->bio);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800491
492 io->start_time = jiffies;
493
Tejun Heo074a7ac2008-08-25 19:56:14 +0900494 cpu = part_stat_lock();
495 part_round_stats(cpu, &dm_disk(md)->part0);
496 part_stat_unlock();
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200497 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800498}
499
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000500static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800501{
502 struct mapped_device *md = io->md;
503 struct bio *bio = io->bio;
504 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900505 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800506 int rw = bio_data_dir(bio);
507
Tejun Heo074a7ac2008-08-25 19:56:14 +0900508 cpu = part_stat_lock();
509 part_round_stats(cpu, &dm_disk(md)->part0);
510 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
511 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800512
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100513 /*
514 * After this is decremented the bio must not be touched if it is
515 * a barrier.
516 */
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200517 dm_disk(md)->part0.in_flight[rw] = pending =
518 atomic_dec_return(&md->pending[rw]);
519 pending += atomic_read(&md->pending[rw^0x1]);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800520
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000521 /* nudge anyone waiting on suspend queue */
522 if (!pending)
523 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800524}
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526/*
527 * Add the bio to the list of deferred io.
528 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100529static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700531 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Mikulas Patocka022c2612009-04-02 19:55:39 +0100533 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100535 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Mikulas Patocka92c63902009-04-09 00:27:15 +0100537 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
538 queue_work(md->wq, &md->work);
539
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700540 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
543/*
544 * Everyone (including functions in this file), should use this
545 * function to access the md->map field, and make sure they call
546 * dm_table_put() when finished.
547 */
Alasdair G Kergon7c666412009-12-10 23:52:19 +0000548struct dm_table *dm_get_live_table(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
550 struct dm_table *t;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100551 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100553 read_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 t = md->map;
555 if (t)
556 dm_table_get(t);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +0100557 read_unlock_irqrestore(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
559 return t;
560}
561
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800562/*
563 * Get the geometry associated with a dm device
564 */
565int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
566{
567 *geo = md->geometry;
568
569 return 0;
570}
571
572/*
573 * Set the geometry of a device.
574 */
575int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
576{
577 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
578
579 if (geo->start > sz) {
580 DMWARN("Start sector is beyond the geometry limits.");
581 return -EINVAL;
582 }
583
584 md->geometry = *geo;
585
586 return 0;
587}
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589/*-----------------------------------------------------------------
590 * CRUD START:
591 * A more elegant soln is in the works that uses the queue
592 * merge fn, unfortunately there are a couple of changes to
593 * the block layer that I want to make for this. So in the
594 * interests of getting something for people to use I give
595 * you this clearly demarcated crap.
596 *---------------------------------------------------------------*/
597
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800598static int __noflush_suspending(struct mapped_device *md)
599{
600 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
601}
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603/*
604 * Decrements the number of outstanding ios that a bio has been
605 * cloned into, completing the original io if necc.
606 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800607static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800609 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000610 int io_error;
611 struct bio *bio;
612 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800613
614 /* Push-back supersedes any I/O errors */
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +0100615 if (unlikely(error)) {
616 spin_lock_irqsave(&io->endio_lock, flags);
617 if (!(io->error > 0 && __noflush_suspending(md)))
618 io->error = error;
619 spin_unlock_irqrestore(&io->endio_lock, flags);
620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800623 if (io->error == DM_ENDIO_REQUEUE) {
624 /*
625 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800626 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100627 spin_lock_irqsave(&md->deferred_lock, flags);
Mikulas Patocka2761e952009-06-22 10:12:18 +0100628 if (__noflush_suspending(md)) {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200629 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
Mikulas Patocka2761e952009-06-22 10:12:18 +0100630 bio_list_add_head(&md->deferred,
631 io->bio);
632 } else
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800633 /* noflush suspend was interrupted. */
634 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100635 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800636 }
637
Milan Brozb35f8ca2009-03-16 17:44:36 +0000638 io_error = io->error;
639 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100640
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200641 if (bio->bi_rw & REQ_HARDBARRIER) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100642 /*
643 * There can be just one barrier request so we use
644 * a per-device variable for error reporting.
645 * Note that you can't touch the bio after end_io_acct
Mikulas Patocka708e9292010-08-12 04:14:00 +0100646 *
647 * We ignore -EOPNOTSUPP for empty flush reported by
648 * underlying devices. We assume that if the device
649 * doesn't support empty barriers, it doesn't need
650 * cache flushing commands.
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100651 */
Mikulas Patocka708e9292010-08-12 04:14:00 +0100652 if (!md->barrier_error &&
653 !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP))
Mikulas Patocka5aa27812009-06-22 10:12:18 +0100654 md->barrier_error = io_error;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100655 end_io_acct(io);
Mikulas Patockaa97f9252010-03-06 02:32:29 +0000656 free_io(md, io);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100657 } else {
658 end_io_acct(io);
Mikulas Patockaa97f9252010-03-06 02:32:29 +0000659 free_io(md, io);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000660
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100661 if (io_error != DM_ENDIO_REQUEUE) {
662 trace_block_bio_complete(md->queue, bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000663
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100664 bio_endio(bio, io_error);
665 }
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800666 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 }
668}
669
NeilBrown6712ecf2007-09-27 12:47:43 +0200670static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
672 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100673 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000674 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700675 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 dm_endio_fn endio = tio->ti->type->end_io;
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
679 error = -EIO;
680
681 if (endio) {
682 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800683 if (r < 0 || r == DM_ENDIO_REQUEUE)
684 /*
685 * error and requeue request are handled
686 * in dec_pending().
687 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800689 else if (r == DM_ENDIO_INCOMPLETE)
690 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200691 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800692 else if (r) {
693 DMWARN("unimplemented target endio return value: %d", r);
694 BUG();
695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697
Stefan Bader9faf4002006-10-03 01:15:41 -0700698 /*
699 * Store md for cleanup instead of tio which is about to get freed.
700 */
701 bio->bi_private = md->bs;
702
Stefan Bader9faf4002006-10-03 01:15:41 -0700703 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000704 bio_put(bio);
705 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706}
707
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100708/*
709 * Partial completion handling for request-based dm
710 */
711static void end_clone_bio(struct bio *clone, int error)
712{
713 struct dm_rq_clone_bio_info *info = clone->bi_private;
714 struct dm_rq_target_io *tio = info->tio;
715 struct bio *bio = info->orig;
716 unsigned int nr_bytes = info->orig->bi_size;
717
718 bio_put(clone);
719
720 if (tio->error)
721 /*
722 * An error has already been detected on the request.
723 * Once error occurred, just let clone->end_io() handle
724 * the remainder.
725 */
726 return;
727 else if (error) {
728 /*
729 * Don't notice the error to the upper layer yet.
730 * The error handling decision is made by the target driver,
731 * when the request is completed.
732 */
733 tio->error = error;
734 return;
735 }
736
737 /*
738 * I/O for the bio successfully completed.
739 * Notice the data completion to the upper layer.
740 */
741
742 /*
743 * bios are processed from the head of the list.
744 * So the completing bio should always be rq->bio.
745 * If it's not, something wrong is happening.
746 */
747 if (tio->orig->bio != bio)
748 DMERR("bio completion is going in the middle of the request");
749
750 /*
751 * Update the original request.
752 * Do not use blk_end_request() here, because it may complete
753 * the original request before the clone, and break the ordering.
754 */
755 blk_update_request(tio->orig, 0, nr_bytes);
756}
757
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000758static void store_barrier_error(struct mapped_device *md, int error)
759{
760 unsigned long flags;
761
762 spin_lock_irqsave(&md->barrier_error_lock, flags);
763 /*
764 * Basically, the first error is taken, but:
765 * -EOPNOTSUPP supersedes any I/O error.
766 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
767 */
768 if (!md->barrier_error || error == -EOPNOTSUPP ||
769 (md->barrier_error != -EOPNOTSUPP &&
770 error == DM_ENDIO_REQUEUE))
771 md->barrier_error = error;
772 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
773}
774
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100775/*
776 * Don't touch any member of the md after calling this function because
777 * the md may be freed in dm_put() at the end of this function.
778 * Or do dm_get() before calling this function and dm_put() later.
779 */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000780static void rq_completed(struct mapped_device *md, int rw, int run_queue)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100781{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000782 atomic_dec(&md->pending[rw]);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100783
784 /* nudge anyone waiting on suspend queue */
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000785 if (!md_in_flight(md))
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100786 wake_up(&md->wait);
787
788 if (run_queue)
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000789 blk_run_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100790
791 /*
792 * dm_put() must be at the end of this function. See the comment above
793 */
794 dm_put(md);
795}
796
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100797static void free_rq_clone(struct request *clone)
798{
799 struct dm_rq_target_io *tio = clone->end_io_data;
800
801 blk_rq_unprep_clone(clone);
802 free_rq_tio(tio);
803}
804
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000805/*
806 * Complete the clone and the original request.
807 * Must be called without queue lock.
808 */
809static void dm_end_request(struct request *clone, int error)
810{
811 int rw = rq_data_dir(clone);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000812 int run_queue = 1;
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200813 bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000814 struct dm_rq_target_io *tio = clone->end_io_data;
815 struct mapped_device *md = tio->md;
816 struct request *rq = tio->orig;
817
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200818 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000819 rq->errors = clone->errors;
820 rq->resid_len = clone->resid_len;
821
822 if (rq->sense)
823 /*
824 * We are using the sense buffer of the original
825 * request.
826 * So setting the length of the sense data is enough.
827 */
828 rq->sense_len = clone->sense_len;
829 }
830
831 free_rq_clone(clone);
832
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000833 if (unlikely(is_barrier)) {
834 if (unlikely(error))
835 store_barrier_error(md, error);
836 run_queue = 0;
837 } else
838 blk_end_request_all(rq, error);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000839
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000840 rq_completed(md, rw, run_queue);
Kiyoshi Ueda980691e2009-12-10 23:52:17 +0000841}
842
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100843static void dm_unprep_request(struct request *rq)
844{
845 struct request *clone = rq->special;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100846
847 rq->special = NULL;
848 rq->cmd_flags &= ~REQ_DONTPREP;
849
Kiyoshi Uedaa77e28c2009-09-04 20:40:16 +0100850 free_rq_clone(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100851}
852
853/*
854 * Requeue the original request of a clone.
855 */
856void dm_requeue_unmapped_request(struct request *clone)
857{
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000858 int rw = rq_data_dir(clone);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100859 struct dm_rq_target_io *tio = clone->end_io_data;
860 struct mapped_device *md = tio->md;
861 struct request *rq = tio->orig;
862 struct request_queue *q = rq->q;
863 unsigned long flags;
864
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200865 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000866 /*
867 * Barrier clones share an original request.
868 * Leave it to dm_end_request(), which handles this special
869 * case.
870 */
871 dm_end_request(clone, DM_ENDIO_REQUEUE);
872 return;
873 }
874
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100875 dm_unprep_request(rq);
876
877 spin_lock_irqsave(q->queue_lock, flags);
878 if (elv_queue_empty(q))
879 blk_plug_device(q);
880 blk_requeue_request(q, rq);
881 spin_unlock_irqrestore(q->queue_lock, flags);
882
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +0000883 rq_completed(md, rw, 0);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100884}
885EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
886
887static void __stop_queue(struct request_queue *q)
888{
889 blk_stop_queue(q);
890}
891
892static void stop_queue(struct request_queue *q)
893{
894 unsigned long flags;
895
896 spin_lock_irqsave(q->queue_lock, flags);
897 __stop_queue(q);
898 spin_unlock_irqrestore(q->queue_lock, flags);
899}
900
901static void __start_queue(struct request_queue *q)
902{
903 if (blk_queue_stopped(q))
904 blk_start_queue(q);
905}
906
907static void start_queue(struct request_queue *q)
908{
909 unsigned long flags;
910
911 spin_lock_irqsave(q->queue_lock, flags);
912 __start_queue(q);
913 spin_unlock_irqrestore(q->queue_lock, flags);
914}
915
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000916static void dm_done(struct request *clone, int error, bool mapped)
917{
918 int r = error;
919 struct dm_rq_target_io *tio = clone->end_io_data;
920 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
921
922 if (mapped && rq_end_io)
923 r = rq_end_io(tio->ti, clone, error, &tio->info);
924
925 if (r <= 0)
926 /* The target wants to complete the I/O */
927 dm_end_request(clone, r);
928 else if (r == DM_ENDIO_INCOMPLETE)
929 /* The target will handle the I/O */
930 return;
931 else if (r == DM_ENDIO_REQUEUE)
932 /* The target wants to requeue the I/O */
933 dm_requeue_unmapped_request(clone);
934 else {
935 DMWARN("unimplemented target endio return value: %d", r);
936 BUG();
937 }
938}
939
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100940/*
941 * Request completion handler for request-based dm
942 */
943static void dm_softirq_done(struct request *rq)
944{
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000945 bool mapped = true;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100946 struct request *clone = rq->completion_data;
947 struct dm_rq_target_io *tio = clone->end_io_data;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100948
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000949 if (rq->cmd_flags & REQ_FAILED)
950 mapped = false;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100951
Kiyoshi Ueda11a68242009-12-10 23:52:17 +0000952 dm_done(clone, tio->error, mapped);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100953}
954
955/*
956 * Complete the clone and the original request with the error status
957 * through softirq context.
958 */
959static void dm_complete_request(struct request *clone, int error)
960{
961 struct dm_rq_target_io *tio = clone->end_io_data;
962 struct request *rq = tio->orig;
963
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200964 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000965 /*
966 * Barrier clones share an original request. So can't use
967 * softirq_done with the original.
968 * Pass the clone to dm_done() directly in this special case.
969 * It is safe (even if clone->q->queue_lock is held here)
970 * because there is no I/O dispatching during the completion
971 * of barrier clone.
972 */
973 dm_done(clone, error, true);
974 return;
975 }
976
Kiyoshi Uedacec47e32009-06-22 10:12:35 +0100977 tio->error = error;
978 rq->completion_data = clone;
979 blk_complete_request(rq);
980}
981
982/*
983 * Complete the not-mapped clone and the original request with the error status
984 * through softirq context.
985 * Target's rq_end_io() function isn't called.
986 * This may be used when the target's map_rq() function fails.
987 */
988void dm_kill_unmapped_request(struct request *clone, int error)
989{
990 struct dm_rq_target_io *tio = clone->end_io_data;
991 struct request *rq = tio->orig;
992
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200993 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +0000994 /*
995 * Barrier clones share an original request.
996 * Leave it to dm_end_request(), which handles this special
997 * case.
998 */
999 BUG_ON(error > 0);
1000 dm_end_request(clone, error);
1001 return;
1002 }
1003
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001004 rq->cmd_flags |= REQ_FAILED;
1005 dm_complete_request(clone, error);
1006}
1007EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1008
1009/*
1010 * Called with the queue lock held
1011 */
1012static void end_clone_request(struct request *clone, int error)
1013{
1014 /*
1015 * For just cleaning up the information of the queue in which
1016 * the clone was dispatched.
1017 * The clone is *NOT* freed actually here because it is alloced from
1018 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1019 */
1020 __blk_put_request(clone->q, clone);
1021
1022 /*
1023 * Actual request completion is done in a softirq context which doesn't
1024 * hold the queue lock. Otherwise, deadlock could occur because:
1025 * - another request may be submitted by the upper level driver
1026 * of the stacking during the completion
1027 * - the submission which requires queue lock may be done
1028 * against this queue
1029 */
1030 dm_complete_request(clone, error);
1031}
1032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033static sector_t max_io_len(struct mapped_device *md,
1034 sector_t sector, struct dm_target *ti)
1035{
1036 sector_t offset = sector - ti->begin;
1037 sector_t len = ti->len - offset;
1038
1039 /*
1040 * Does the target need to split even further ?
1041 */
1042 if (ti->split_io) {
1043 sector_t boundary;
1044 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
1045 - offset;
1046 if (len > boundary)
1047 len = boundary;
1048 }
1049
1050 return len;
1051}
1052
1053static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001054 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055{
1056 int r;
Jens Axboe2056a782006-03-23 20:00:26 +01001057 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -07001058 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 clone->bi_end_io = clone_endio;
1061 clone->bi_private = tio;
1062
1063 /*
1064 * Map the clone. If r == 0 we don't need to do
1065 * anything, the target has assumed ownership of
1066 * this io.
1067 */
1068 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +01001069 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001071 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +01001073
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +01001074 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -04001075 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +01001076
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001078 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1079 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -07001080 md = tio->io->md;
1081 dec_pending(tio->io, r);
1082 /*
1083 * Store bio_set for cleanup.
1084 */
1085 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -07001087 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -08001088 } else if (r) {
1089 DMWARN("unimplemented target map return value: %d", r);
1090 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 }
1092}
1093
1094struct clone_info {
1095 struct mapped_device *md;
1096 struct dm_table *map;
1097 struct bio *bio;
1098 struct dm_io *io;
1099 sector_t sector;
1100 sector_t sector_count;
1101 unsigned short idx;
1102};
1103
Peter Osterlund36763472005-09-06 15:16:42 -07001104static void dm_bio_destructor(struct bio *bio)
1105{
Stefan Bader9faf4002006-10-03 01:15:41 -07001106 struct bio_set *bs = bio->bi_private;
1107
1108 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001109}
1110
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111/*
1112 * Creates a little bio that is just does part of a bvec.
1113 */
1114static struct bio *split_bvec(struct bio *bio, sector_t sector,
1115 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -07001116 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117{
1118 struct bio *clone;
1119 struct bio_vec *bv = bio->bi_io_vec + idx;
1120
Stefan Bader9faf4002006-10-03 01:15:41 -07001121 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -07001122 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 *clone->bi_io_vec = *bv;
1124
1125 clone->bi_sector = sector;
1126 clone->bi_bdev = bio->bi_bdev;
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001127 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 clone->bi_vcnt = 1;
1129 clone->bi_size = to_bytes(len);
1130 clone->bi_io_vec->bv_offset = offset;
1131 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +01001132 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Martin K. Petersen9c470082009-04-09 00:27:12 +01001134 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001135 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001136 bio_integrity_trim(clone,
1137 bio_sector_offset(bio, idx, offset), len);
1138 }
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 return clone;
1141}
1142
1143/*
1144 * Creates a bio that consists of range of complete bvecs.
1145 */
1146static struct bio *clone_bio(struct bio *bio, sector_t sector,
1147 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -07001148 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149{
1150 struct bio *clone;
1151
Stefan Bader9faf4002006-10-03 01:15:41 -07001152 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1153 __bio_clone(clone, bio);
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001154 clone->bi_rw &= ~REQ_HARDBARRIER;
Stefan Bader9faf4002006-10-03 01:15:41 -07001155 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 clone->bi_sector = sector;
1157 clone->bi_idx = idx;
1158 clone->bi_vcnt = idx + bv_count;
1159 clone->bi_size = to_bytes(len);
1160 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1161
Martin K. Petersen9c470082009-04-09 00:27:12 +01001162 if (bio_integrity(bio)) {
Martin K. Petersen7878cba2009-06-26 15:37:49 +02001163 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001164
1165 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1166 bio_integrity_trim(clone,
1167 bio_sector_offset(bio, idx, 0), len);
1168 }
1169
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 return clone;
1171}
1172
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001173static struct dm_target_io *alloc_tio(struct clone_info *ci,
1174 struct dm_target *ti)
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001175{
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001176 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001177
1178 tio->io = ci->io;
1179 tio->ti = ti;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001180 memset(&tio->info, 0, sizeof(tio->info));
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001181
1182 return tio;
1183}
1184
1185static void __flush_target(struct clone_info *ci, struct dm_target *ti,
Mike Snitzer57cba5d2010-08-12 04:14:04 +01001186 unsigned request_nr)
Alasdair G Kergon9015df22009-06-22 10:12:21 +01001187{
1188 struct dm_target_io *tio = alloc_tio(ci, ti);
1189 struct bio *clone;
1190
Mike Snitzer57cba5d2010-08-12 04:14:04 +01001191 tio->info.target_request_nr = request_nr;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001192
1193 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1194 __bio_clone(clone, ci->bio);
1195 clone->bi_destructor = dm_bio_destructor;
1196
1197 __map_bio(ti, clone, tio);
1198}
1199
1200static int __clone_and_map_empty_barrier(struct clone_info *ci)
1201{
Mike Snitzer57cba5d2010-08-12 04:14:04 +01001202 unsigned target_nr = 0, request_nr;
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001203 struct dm_target *ti;
1204
1205 while ((ti = dm_table_get_target(ci->map, target_nr++)))
Mike Snitzer57cba5d2010-08-12 04:14:04 +01001206 for (request_nr = 0; request_nr < ti->num_flush_requests;
1207 request_nr++)
1208 __flush_target(ci, ti, request_nr);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001209
1210 ci->sector_count = 0;
1211
1212 return 0;
1213}
1214
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001215/*
1216 * Perform all io with a single clone.
1217 */
1218static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1219{
1220 struct bio *clone, *bio = ci->bio;
1221 struct dm_target_io *tio;
1222
1223 tio = alloc_tio(ci, ti);
1224 clone = clone_bio(bio, ci->sector, ci->idx,
1225 bio->bi_vcnt - ci->idx, ci->sector_count,
1226 ci->md->bs);
1227 __map_bio(ti, clone, tio);
1228 ci->sector_count = 0;
1229}
1230
1231static int __clone_and_map_discard(struct clone_info *ci)
1232{
1233 struct dm_target *ti;
1234 sector_t max;
1235
1236 ti = dm_table_find_target(ci->map, ci->sector);
1237 if (!dm_target_is_valid(ti))
1238 return -EIO;
1239
1240 /*
1241 * Even though the device advertised discard support,
1242 * reconfiguration might have changed that since the
1243 * check was performed.
1244 */
1245
1246 if (!ti->num_discard_requests)
1247 return -EOPNOTSUPP;
1248
1249 max = max_io_len(ci->md, ci->sector, ti);
1250
1251 if (ci->sector_count > max)
1252 /*
1253 * FIXME: Handle a discard that spans two or more targets.
1254 */
1255 return -EOPNOTSUPP;
1256
1257 __clone_and_map_simple(ci, ti);
1258
1259 return 0;
1260}
1261
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001262static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263{
1264 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001265 struct dm_target *ti;
1266 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001267 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001269 if (unlikely(bio_empty_barrier(bio)))
1270 return __clone_and_map_empty_barrier(ci);
1271
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001272 if (unlikely(bio->bi_rw & REQ_DISCARD))
1273 return __clone_and_map_discard(ci);
1274
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001275 ti = dm_table_find_target(ci->map, ci->sector);
1276 if (!dm_target_is_valid(ti))
1277 return -EIO;
1278
1279 max = max_io_len(ci->md, ci->sector, ti);
1280
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 if (ci->sector_count <= max) {
1282 /*
1283 * Optimise for the simple case where we can do all of
1284 * the remaining io with a single clone.
1285 */
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001286 __clone_and_map_simple(ci, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
1288 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1289 /*
1290 * There are some bvecs that don't span targets.
1291 * Do as many of these as possible.
1292 */
1293 int i;
1294 sector_t remaining = max;
1295 sector_t bv_len;
1296
1297 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1298 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1299
1300 if (bv_len > remaining)
1301 break;
1302
1303 remaining -= bv_len;
1304 len += bv_len;
1305 }
1306
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001307 tio = alloc_tio(ci, ti);
Stefan Bader9faf4002006-10-03 01:15:41 -07001308 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1309 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 __map_bio(ti, clone, tio);
1311
1312 ci->sector += len;
1313 ci->sector_count -= len;
1314 ci->idx = i;
1315
1316 } else {
1317 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001318 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 */
1320 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001321 sector_t remaining = to_sector(bv->bv_len);
1322 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001324 do {
1325 if (offset) {
1326 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001327 if (!dm_target_is_valid(ti))
1328 return -EIO;
1329
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001330 max = max_io_len(ci->md, ci->sector, ti);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001333 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Mike Snitzer5ae89a82010-08-12 04:14:08 +01001335 tio = alloc_tio(ci, ti);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001336 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -07001337 bv->bv_offset + offset, len,
1338 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -08001339
1340 __map_bio(ti, clone, tio);
1341
1342 ci->sector += len;
1343 ci->sector_count -= len;
1344 offset += to_bytes(len);
1345 } while (remaining -= len);
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 ci->idx++;
1348 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001349
1350 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351}
1352
1353/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +01001354 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001356static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357{
1358 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001359 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001361 ci.map = dm_get_live_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001362 if (unlikely(!ci.map)) {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001363 if (!(bio->bi_rw & REQ_HARDBARRIER))
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001364 bio_io_error(bio);
1365 else
Mikulas Patocka5aa27812009-06-22 10:12:18 +01001366 if (!md->barrier_error)
1367 md->barrier_error = -EIO;
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001368 return;
1369 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +01001370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 ci.md = md;
1372 ci.bio = bio;
1373 ci.io = alloc_io(md);
1374 ci.io->error = 0;
1375 atomic_set(&ci.io->io_count, 1);
1376 ci.io->bio = bio;
1377 ci.io->md = md;
Kiyoshi Uedaf88fb982009-10-16 23:18:15 +01001378 spin_lock_init(&ci.io->endio_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 ci.sector = bio->bi_sector;
1380 ci.sector_count = bio_sectors(bio);
Mikulas Patockaf9ab94c2009-06-22 10:12:20 +01001381 if (unlikely(bio_empty_barrier(bio)))
1382 ci.sector_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 ci.idx = bio->bi_idx;
1384
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -08001385 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001386 while (ci.sector_count && !error)
1387 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +00001390 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 dm_table_put(ci.map);
1392}
1393/*-----------------------------------------------------------------
1394 * CRUD END
1395 *---------------------------------------------------------------*/
1396
Milan Brozf6fccb12008-07-21 12:00:37 +01001397static int dm_merge_bvec(struct request_queue *q,
1398 struct bvec_merge_data *bvm,
1399 struct bio_vec *biovec)
1400{
1401 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001402 struct dm_table *map = dm_get_live_table(md);
Milan Brozf6fccb12008-07-21 12:00:37 +01001403 struct dm_target *ti;
1404 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +01001405 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001406
1407 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +01001408 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +01001409
1410 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001411 if (!dm_target_is_valid(ti))
1412 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +01001413
1414 /*
1415 * Find maximum amount of I/O that won't need splitting
1416 */
1417 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1418 (sector_t) BIO_MAX_SECTORS);
1419 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1420 if (max_size < 0)
1421 max_size = 0;
1422
1423 /*
1424 * merge_bvec_fn() returns number of bytes
1425 * it can accept at this offset
1426 * max is precomputed maximal io size
1427 */
1428 if (max_size && ti->type->merge)
1429 max_size = ti->type->merge(ti, bvm, biovec, max_size);
Mikulas Patocka8cbeb672009-06-22 10:12:14 +01001430 /*
1431 * If the target doesn't support merge method and some of the devices
1432 * provided their merge_bvec method (we know this by looking at
1433 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1434 * entries. So always set max_size to 0, and the code below allows
1435 * just one page.
1436 */
1437 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1438
1439 max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +01001440
Mikulas Patockab01cd5a2008-10-01 14:39:24 +01001441out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +01001442 dm_table_put(map);
1443
1444out:
Milan Brozf6fccb12008-07-21 12:00:37 +01001445 /*
1446 * Always allow an entire first page
1447 */
1448 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1449 max_size = biovec->bv_len;
1450
Milan Brozf6fccb12008-07-21 12:00:37 +01001451 return max_size;
1452}
1453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454/*
1455 * The request function that just remaps the bio built up by
1456 * dm_merge_bvec.
1457 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001458static int _dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459{
Kevin Corry12f03a42006-02-01 03:04:52 -08001460 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +09001462 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001464 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
Tejun Heo074a7ac2008-08-25 19:56:14 +09001466 cpu = part_stat_lock();
1467 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1468 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1469 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -08001470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001472 * If we're suspended or the thread is processing barriers
1473 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 */
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001475 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001476 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001477 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +01001479 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1480 bio_rw(bio) == READA) {
1481 bio_io_error(bio);
1482 return 0;
1483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Mikulas Patocka92c63902009-04-09 00:27:15 +01001485 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
Mikulas Patocka92c63902009-04-09 00:27:15 +01001487 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 }
1489
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001490 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001491 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +01001492 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493}
1494
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001495static int dm_make_request(struct request_queue *q, struct bio *bio)
1496{
1497 struct mapped_device *md = q->queuedata;
1498
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001499 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1500}
1501
1502static int dm_request_based(struct mapped_device *md)
1503{
1504 return blk_queue_stackable(md->queue);
1505}
1506
1507static int dm_request(struct request_queue *q, struct bio *bio)
1508{
1509 struct mapped_device *md = q->queuedata;
1510
1511 if (dm_request_based(md))
1512 return dm_make_request(q, bio);
1513
1514 return _dm_request(q, bio);
1515}
1516
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001517static bool dm_rq_is_flush_request(struct request *rq)
1518{
FUJITA Tomonori144d6ed2010-07-03 17:45:37 +09001519 if (rq->cmd_flags & REQ_FLUSH)
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001520 return true;
1521 else
1522 return false;
1523}
1524
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001525void dm_dispatch_request(struct request *rq)
1526{
1527 int r;
1528
1529 if (blk_queue_io_stat(rq->q))
1530 rq->cmd_flags |= REQ_IO_STAT;
1531
1532 rq->start_time = jiffies;
1533 r = blk_insert_cloned_request(rq->q, rq);
1534 if (r)
1535 dm_complete_request(rq, r);
1536}
1537EXPORT_SYMBOL_GPL(dm_dispatch_request);
1538
1539static void dm_rq_bio_destructor(struct bio *bio)
1540{
1541 struct dm_rq_clone_bio_info *info = bio->bi_private;
1542 struct mapped_device *md = info->tio->md;
1543
1544 free_bio_info(info);
1545 bio_free(bio, md->bs);
1546}
1547
1548static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1549 void *data)
1550{
1551 struct dm_rq_target_io *tio = data;
1552 struct mapped_device *md = tio->md;
1553 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1554
1555 if (!info)
1556 return -ENOMEM;
1557
1558 info->orig = bio_orig;
1559 info->tio = tio;
1560 bio->bi_end_io = end_clone_bio;
1561 bio->bi_private = info;
1562 bio->bi_destructor = dm_rq_bio_destructor;
1563
1564 return 0;
1565}
1566
1567static int setup_clone(struct request *clone, struct request *rq,
1568 struct dm_rq_target_io *tio)
1569{
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001570 int r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001571
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001572 if (dm_rq_is_flush_request(rq)) {
1573 blk_rq_init(NULL, clone);
1574 clone->cmd_type = REQ_TYPE_FS;
1575 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1576 } else {
1577 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1578 dm_rq_bio_constructor, tio);
1579 if (r)
1580 return r;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001581
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001582 clone->cmd = rq->cmd;
1583 clone->cmd_len = rq->cmd_len;
1584 clone->sense = rq->sense;
1585 clone->buffer = rq->buffer;
1586 }
1587
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001588 clone->end_io = end_clone_request;
1589 clone->end_io_data = tio;
1590
1591 return 0;
1592}
1593
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001594static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1595 gfp_t gfp_mask)
1596{
1597 struct request *clone;
1598 struct dm_rq_target_io *tio;
1599
1600 tio = alloc_rq_tio(md, gfp_mask);
1601 if (!tio)
1602 return NULL;
1603
1604 tio->md = md;
1605 tio->ti = NULL;
1606 tio->orig = rq;
1607 tio->error = 0;
1608 memset(&tio->info, 0, sizeof(tio->info));
1609
1610 clone = &tio->clone;
1611 if (setup_clone(clone, rq, tio)) {
1612 /* -ENOMEM */
1613 free_rq_tio(tio);
1614 return NULL;
1615 }
1616
1617 return clone;
1618}
1619
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001620/*
1621 * Called with the queue lock held.
1622 */
1623static int dm_prep_fn(struct request_queue *q, struct request *rq)
1624{
1625 struct mapped_device *md = q->queuedata;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001626 struct request *clone;
1627
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001628 if (unlikely(dm_rq_is_flush_request(rq)))
1629 return BLKPREP_OK;
1630
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001631 if (unlikely(rq->special)) {
1632 DMWARN("Already has something in rq->special.");
1633 return BLKPREP_KILL;
1634 }
1635
Kiyoshi Ueda6facdaf2009-12-10 23:52:15 +00001636 clone = clone_rq(rq, md, GFP_ATOMIC);
1637 if (!clone)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001638 return BLKPREP_DEFER;
1639
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001640 rq->special = clone;
1641 rq->cmd_flags |= REQ_DONTPREP;
1642
1643 return BLKPREP_OK;
1644}
1645
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001646/*
1647 * Returns:
1648 * 0 : the request has been processed (not requeued)
1649 * !0 : the request has been requeued
1650 */
1651static int map_request(struct dm_target *ti, struct request *clone,
1652 struct mapped_device *md)
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001653{
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001654 int r, requeued = 0;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001655 struct dm_rq_target_io *tio = clone->end_io_data;
1656
1657 /*
1658 * Hold the md reference here for the in-flight I/O.
1659 * We can't rely on the reference count by device opener,
1660 * because the device may be closed during the request completion
1661 * when all bios are completed.
1662 * See the comment in rq_completed() too.
1663 */
1664 dm_get(md);
1665
1666 tio->ti = ti;
1667 r = ti->type->map_rq(ti, clone, &tio->info);
1668 switch (r) {
1669 case DM_MAPIO_SUBMITTED:
1670 /* The target has taken the I/O to submit by itself later */
1671 break;
1672 case DM_MAPIO_REMAPPED:
1673 /* The target has remapped the I/O so dispatch it */
Jun'ichi Nomura6db4ccd2009-12-10 23:52:25 +00001674 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1675 blk_rq_pos(tio->orig));
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001676 dm_dispatch_request(clone);
1677 break;
1678 case DM_MAPIO_REQUEUE:
1679 /* The target wants to requeue the I/O */
1680 dm_requeue_unmapped_request(clone);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001681 requeued = 1;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001682 break;
1683 default:
1684 if (r > 0) {
1685 DMWARN("unimplemented target map return value: %d", r);
1686 BUG();
1687 }
1688
1689 /* The target wants to complete the I/O */
1690 dm_kill_unmapped_request(clone, r);
1691 break;
1692 }
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001693
1694 return requeued;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001695}
1696
1697/*
1698 * q->request_fn for request-based dm.
1699 * Called with the queue lock held.
1700 */
1701static void dm_request_fn(struct request_queue *q)
1702{
1703 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001704 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001705 struct dm_target *ti;
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001706 struct request *rq, *clone;
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001707
1708 /*
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001709 * For suspend, check blk_queue_stopped() and increment
1710 * ->pending within a single queue_lock not to increment the
1711 * number of in-flight I/Os after the queue is stopped in
1712 * dm_suspend().
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001713 */
1714 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1715 rq = blk_peek_request(q);
1716 if (!rq)
1717 goto plug_and_out;
1718
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001719 if (unlikely(dm_rq_is_flush_request(rq))) {
1720 BUG_ON(md->flush_request);
1721 md->flush_request = rq;
1722 blk_start_request(rq);
1723 queue_work(md->wq, &md->barrier_work);
1724 goto out;
1725 }
1726
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001727 ti = dm_table_find_target(map, blk_rq_pos(rq));
1728 if (ti->type->busy && ti->type->busy(ti))
1729 goto plug_and_out;
1730
1731 blk_start_request(rq);
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00001732 clone = rq->special;
1733 atomic_inc(&md->pending[rq_data_dir(clone)]);
1734
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001735 spin_unlock(q->queue_lock);
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001736 if (map_request(ti, clone, md))
1737 goto requeued;
1738
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001739 spin_lock_irq(q->queue_lock);
1740 }
1741
1742 goto out;
1743
Kiyoshi Ueda9eef87d2010-02-16 18:43:01 +00001744requeued:
1745 spin_lock_irq(q->queue_lock);
1746
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001747plug_and_out:
1748 if (!elv_queue_empty(q))
1749 /* Some requests still remain, retry later */
1750 blk_plug_device(q);
1751
1752out:
1753 dm_table_put(map);
1754
1755 return;
1756}
1757
1758int dm_underlying_device_busy(struct request_queue *q)
1759{
1760 return blk_lld_busy(q);
1761}
1762EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1763
1764static int dm_lld_busy(struct request_queue *q)
1765{
1766 int r;
1767 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001768 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001769
1770 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1771 r = 1;
1772 else
1773 r = dm_table_any_busy_target(map);
1774
1775 dm_table_put(map);
1776
1777 return r;
1778}
1779
Jens Axboe165125e2007-07-24 09:28:11 +02001780static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781{
1782 struct mapped_device *md = q->queuedata;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001783 struct dm_table *map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001786 if (dm_request_based(md))
1787 generic_unplug_device(q);
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 dm_table_unplug_all(map);
1790 dm_table_put(map);
1791 }
1792}
1793
1794static int dm_any_congested(void *congested_data, int bdi_bits)
1795{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001796 int r = bdi_bits;
1797 struct mapped_device *md = congested_data;
1798 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001800 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergon7c666412009-12-10 23:52:19 +00001801 map = dm_get_live_table(md);
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001802 if (map) {
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01001803 /*
1804 * Request-based dm cares about only own queue for
1805 * the query about congestion status of request_queue
1806 */
1807 if (dm_request_based(md))
1808 r = md->queue->backing_dev_info.state &
1809 bdi_bits;
1810 else
1811 r = dm_table_any_congested(map, bdi_bits);
1812
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001813 dm_table_put(map);
1814 }
1815 }
1816
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 return r;
1818}
1819
1820/*-----------------------------------------------------------------
1821 * An IDR is used to keep track of allocated minor numbers.
1822 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823static DEFINE_IDR(_minor_idr);
1824
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001825static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001827 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001829 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830}
1831
1832/*
1833 * See if the device with a specific minor # is free.
1834 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001835static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836{
1837 int r, m;
1838
1839 if (minor >= (1 << MINORBITS))
1840 return -EINVAL;
1841
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001842 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1843 if (!r)
1844 return -ENOMEM;
1845
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001846 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
1848 if (idr_find(&_minor_idr, minor)) {
1849 r = -EBUSY;
1850 goto out;
1851 }
1852
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001853 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001854 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
1857 if (m != minor) {
1858 idr_remove(&_minor_idr, m);
1859 r = -EBUSY;
1860 goto out;
1861 }
1862
1863out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001864 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 return r;
1866}
1867
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001868static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001870 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001873 if (!r)
1874 return -ENOMEM;
1875
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001876 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001878 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001879 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 if (m >= (1 << MINORBITS)) {
1883 idr_remove(&_minor_idr, m);
1884 r = -ENOSPC;
1885 goto out;
1886 }
1887
1888 *minor = m;
1889
1890out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001891 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 return r;
1893}
1894
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001895static const struct block_device_operations dm_blk_dops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
Mikulas Patocka53d59142009-04-02 19:55:37 +01001897static void dm_wq_work(struct work_struct *work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001898static void dm_rq_barrier_work(struct work_struct *work);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001899
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01001900static void dm_init_md_queue(struct mapped_device *md)
1901{
1902 /*
1903 * Request-based dm devices cannot be stacked on top of bio-based dm
1904 * devices. The type of this dm device has not been decided yet.
1905 * The type is decided at the first table loading time.
1906 * To prevent problematic device stacking, clear the queue flag
1907 * for request stacking support until then.
1908 *
1909 * This queue is new, so no concurrency on the queue_flags.
1910 */
1911 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1912
1913 md->queue->queuedata = md;
1914 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1915 md->queue->backing_dev_info.congested_data = md;
1916 blk_queue_make_request(md->queue, dm_request);
1917 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1918 md->queue->unplug_fn = dm_unplug_all;
1919 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1920}
1921
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922/*
1923 * Allocate and initialise a blank device with a given minor.
1924 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001925static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001928 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001929 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931 if (!md) {
1932 DMWARN("unable to allocate device, out of memory.");
1933 return NULL;
1934 }
1935
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001936 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001937 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001938
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001940 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001941 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001942 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001943 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001945 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
Mike Snitzera5664da2010-08-12 04:14:01 +01001947 md->type = DM_TYPE_NONE;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001948 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001949 mutex_init(&md->suspend_lock);
Mike Snitzera5664da2010-08-12 04:14:01 +01001950 mutex_init(&md->type_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001951 spin_lock_init(&md->deferred_lock);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001952 spin_lock_init(&md->barrier_error_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 rwlock_init(&md->map_lock);
1954 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001955 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001957 atomic_set(&md->uevent_seq, 0);
1958 INIT_LIST_HEAD(&md->uevent_list);
1959 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01001961 md->queue = blk_alloc_queue(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001963 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01001965 dm_init_md_queue(md);
Stefan Bader9faf4002006-10-03 01:15:41 -07001966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 md->disk = alloc_disk(1);
1968 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001969 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02001971 atomic_set(&md->pending[0], 0);
1972 atomic_set(&md->pending[1], 0);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001973 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001974 INIT_WORK(&md->work, dm_wq_work);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00001975 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001976 init_waitqueue_head(&md->eventq);
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 md->disk->major = _major;
1979 md->disk->first_minor = minor;
1980 md->disk->fops = &dm_blk_dops;
1981 md->disk->queue = md->queue;
1982 md->disk->private_data = md;
1983 sprintf(md->disk->disk_name, "dm-%d", minor);
1984 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001985 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Milan Broz304f3f62008-02-08 02:11:17 +00001987 md->wq = create_singlethread_workqueue("kdmflush");
1988 if (!md->wq)
1989 goto bad_thread;
1990
Mikulas Patocka32a926d2009-06-22 10:12:17 +01001991 md->bdev = bdget_disk(md->disk, 0);
1992 if (!md->bdev)
1993 goto bad_bdev;
1994
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001995 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001996 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001997 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001998 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001999
2000 BUG_ON(old_md != MINOR_ALLOCED);
2001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 return md;
2003
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002004bad_bdev:
2005 destroy_workqueue(md->wq);
Milan Broz304f3f62008-02-08 02:11:17 +00002006bad_thread:
Zdenek Kabelac03022c52009-10-16 23:18:15 +01002007 del_gendisk(md->disk);
Milan Broz304f3f62008-02-08 02:11:17 +00002008 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00002009bad_disk:
Al Viro1312f402006-03-12 11:02:03 -05002010 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00002011bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00002013bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07002014 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00002015bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 kfree(md);
2017 return NULL;
2018}
2019
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01002020static void unlock_fs(struct mapped_device *md);
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022static void free_dev(struct mapped_device *md)
2023{
Tejun Heof331c022008-09-03 09:01:48 +02002024 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08002025
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002026 unlock_fs(md);
2027 bdput(md->bdev);
Milan Broz304f3f62008-02-08 02:11:17 +00002028 destroy_workqueue(md->wq);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002029 if (md->tio_pool)
2030 mempool_destroy(md->tio_pool);
2031 if (md->io_pool)
2032 mempool_destroy(md->io_pool);
2033 if (md->bs)
2034 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01002035 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08002037 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002038
2039 spin_lock(&_minor_lock);
2040 md->disk->private_data = NULL;
2041 spin_unlock(&_minor_lock);
2042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05002044 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07002045 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 kfree(md);
2047}
2048
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002049static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2050{
2051 struct dm_md_mempools *p;
2052
2053 if (md->io_pool && md->tio_pool && md->bs)
2054 /* the md already has necessary mempools */
2055 goto out;
2056
2057 p = dm_table_get_md_mempools(t);
2058 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2059
2060 md->io_pool = p->io_pool;
2061 p->io_pool = NULL;
2062 md->tio_pool = p->tio_pool;
2063 p->tio_pool = NULL;
2064 md->bs = p->bs;
2065 p->bs = NULL;
2066
2067out:
2068 /* mempool bind completed, now no need any mempools in the table */
2069 dm_table_free_md_mempools(t);
2070}
2071
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072/*
2073 * Bind a table to the device.
2074 */
2075static void event_callback(void *context)
2076{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002077 unsigned long flags;
2078 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 struct mapped_device *md = (struct mapped_device *) context;
2080
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002081 spin_lock_irqsave(&md->uevent_lock, flags);
2082 list_splice_init(&md->uevent_list, &uevents);
2083 spin_unlock_irqrestore(&md->uevent_lock, flags);
2084
Tejun Heoed9e1982008-08-25 19:56:05 +09002085 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 atomic_inc(&md->event_nr);
2088 wake_up(&md->eventq);
2089}
2090
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002091static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092{
Alasdair G Kergon4e90188be2005-07-28 21:15:59 -07002093 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002095 mutex_lock(&md->bdev->bd_inode->i_mutex);
2096 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2097 mutex_unlock(&md->bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098}
2099
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002100/*
2101 * Returns old map, which caller must destroy.
2102 */
2103static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2104 struct queue_limits *limits)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002106 struct dm_table *old_map;
Jens Axboe165125e2007-07-24 09:28:11 +02002107 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 sector_t size;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002109 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
2111 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002112
2113 /*
2114 * Wipe any geometry if the size of the table changed.
2115 */
2116 if (size != get_capacity(md->disk))
2117 memset(&md->geometry, 0, sizeof(md->geometry));
2118
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002119 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002121 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002122
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002123 /*
2124 * The queue hasn't been stopped yet, if the old table type wasn't
2125 * for request-based during suspension. So stop it to prevent
2126 * I/O mapping before resume.
2127 * This must be done before setting the queue restrictions,
2128 * because request-based dm may be run just after the setting.
2129 */
2130 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2131 stop_queue(q);
2132
2133 __bind_mempools(md, t);
2134
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002135 write_lock_irqsave(&md->map_lock, flags);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002136 old_map = md->map;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002137 md->map = t;
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002138 dm_table_set_restrictions(t, q, limits);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002139 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002140
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002141 return old_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142}
2143
Alasdair G Kergona7940152009-12-10 23:52:23 +00002144/*
2145 * Returns unbound table for the caller to free.
2146 */
2147static struct dm_table *__unbind(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148{
2149 struct dm_table *map = md->map;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002150 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 if (!map)
Alasdair G Kergona7940152009-12-10 23:52:23 +00002153 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
2155 dm_table_event_callback(map, NULL, NULL);
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002156 write_lock_irqsave(&md->map_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 md->map = NULL;
Kiyoshi Ueda523d9292009-06-22 10:12:37 +01002158 write_unlock_irqrestore(&md->map_lock, flags);
Alasdair G Kergona7940152009-12-10 23:52:23 +00002159
2160 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161}
2162
2163/*
2164 * Constructor for a new device.
2165 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002166int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167{
2168 struct mapped_device *md;
2169
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07002170 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 if (!md)
2172 return -ENXIO;
2173
Milan Broz784aae72009-01-06 03:05:12 +00002174 dm_sysfs_init(md);
2175
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 *result = md;
2177 return 0;
2178}
2179
Mike Snitzera5664da2010-08-12 04:14:01 +01002180/*
2181 * Functions to manage md->type.
2182 * All are required to hold md->type_lock.
2183 */
2184void dm_lock_md_type(struct mapped_device *md)
2185{
2186 mutex_lock(&md->type_lock);
2187}
2188
2189void dm_unlock_md_type(struct mapped_device *md)
2190{
2191 mutex_unlock(&md->type_lock);
2192}
2193
2194void dm_set_md_type(struct mapped_device *md, unsigned type)
2195{
2196 md->type = type;
2197}
2198
2199unsigned dm_get_md_type(struct mapped_device *md)
2200{
2201 return md->type;
2202}
2203
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +01002204/*
2205 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2206 */
2207static int dm_init_request_based_queue(struct mapped_device *md)
2208{
2209 struct request_queue *q = NULL;
2210
2211 if (md->queue->elevator)
2212 return 1;
2213
2214 /* Fully initialize the queue */
2215 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2216 if (!q)
2217 return 0;
2218
2219 md->queue = q;
2220 md->saved_make_request_fn = md->queue->make_request_fn;
2221 dm_init_md_queue(md);
2222 blk_queue_softirq_done(md->queue, dm_softirq_done);
2223 blk_queue_prep_rq(md->queue, dm_prep_fn);
2224 blk_queue_lld_busy(md->queue, dm_lld_busy);
2225 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
2226
2227 elv_register_queue(md->queue);
2228
2229 return 1;
2230}
2231
2232/*
2233 * Setup the DM device's queue based on md's type
2234 */
2235int dm_setup_md_queue(struct mapped_device *md)
2236{
2237 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2238 !dm_init_request_based_queue(md)) {
2239 DMWARN("Cannot initialize queue for request-based mapped device");
2240 return -EINVAL;
2241 }
2242
2243 return 0;
2244}
2245
David Teigland637842c2006-01-06 00:20:00 -08002246static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247{
2248 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 unsigned minor = MINOR(dev);
2250
2251 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2252 return NULL;
2253
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002254 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
2256 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002257 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02002258 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Kiyoshi Uedaabdc5682010-08-12 04:13:54 +01002259 dm_deleting_md(md) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07002260 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08002261 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002262 goto out;
2263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002265out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07002266 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
David Teigland637842c2006-01-06 00:20:00 -08002268 return md;
2269}
2270
David Teiglandd229a952006-01-06 00:20:01 -08002271struct mapped_device *dm_get_md(dev_t dev)
2272{
2273 struct mapped_device *md = dm_find_md(dev);
2274
2275 if (md)
2276 dm_get(md);
2277
2278 return md;
2279}
2280
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002281void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08002282{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08002283 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284}
2285
2286void dm_set_mdptr(struct mapped_device *md, void *ptr)
2287{
2288 md->interface_ptr = ptr;
2289}
2290
2291void dm_get(struct mapped_device *md)
2292{
2293 atomic_inc(&md->holders);
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002294 BUG_ON(test_bit(DMF_FREEING, &md->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295}
2296
Alasdair G Kergon72d94862006-06-26 00:27:35 -07002297const char *dm_device_name(struct mapped_device *md)
2298{
2299 return md->name;
2300}
2301EXPORT_SYMBOL_GPL(dm_device_name);
2302
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002303static void __dm_destroy(struct mapped_device *md, bool wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304{
Mike Anderson1134e5a2006-03-27 01:17:54 -08002305 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002307 might_sleep();
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07002308
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002309 spin_lock(&_minor_lock);
2310 map = dm_get_live_table(md);
2311 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2312 set_bit(DMF_FREEING, &md->flags);
2313 spin_unlock(&_minor_lock);
2314
2315 if (!dm_suspended_md(md)) {
2316 dm_table_presuspend_targets(map);
2317 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 }
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +01002319
2320 /*
2321 * Rare, but there may be I/O requests still going to complete,
2322 * for example. Wait for all references to disappear.
2323 * No one should increment the reference count of the mapped_device,
2324 * after the mapped_device state becomes DMF_FREEING.
2325 */
2326 if (wait)
2327 while (atomic_read(&md->holders))
2328 msleep(1);
2329 else if (atomic_read(&md->holders))
2330 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2331 dm_device_name(md), atomic_read(&md->holders));
2332
2333 dm_sysfs_exit(md);
2334 dm_table_put(map);
2335 dm_table_destroy(__unbind(md));
2336 free_dev(md);
2337}
2338
2339void dm_destroy(struct mapped_device *md)
2340{
2341 __dm_destroy(md, true);
2342}
2343
2344void dm_destroy_immediate(struct mapped_device *md)
2345{
2346 __dm_destroy(md, false);
2347}
2348
2349void dm_put(struct mapped_device *md)
2350{
2351 atomic_dec(&md->holders);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352}
Edward Goggin79eb8852007-05-09 02:32:56 -07002353EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Mikulas Patocka401600d2009-04-02 19:55:38 +01002355static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00002356{
2357 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002358 DECLARE_WAITQUEUE(wait, current);
2359
2360 dm_unplug_all(md->queue);
2361
2362 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00002363
2364 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01002365 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00002366
2367 smp_mb();
Kiyoshi Uedab4324fe2009-12-10 23:52:16 +00002368 if (!md_in_flight(md))
Milan Broz46125c12008-02-08 02:10:30 +00002369 break;
2370
Mikulas Patocka401600d2009-04-02 19:55:38 +01002371 if (interruptible == TASK_INTERRUPTIBLE &&
2372 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00002373 r = -EINTR;
2374 break;
2375 }
2376
2377 io_schedule();
2378 }
2379 set_current_state(TASK_RUNNING);
2380
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01002381 remove_wait_queue(&md->wait, &wait);
2382
Milan Broz46125c12008-02-08 02:10:30 +00002383 return r;
2384}
2385
Mikulas Patocka531fe962009-06-22 10:12:17 +01002386static void dm_flush(struct mapped_device *md)
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002387{
2388 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patocka52b1fd52009-06-22 10:12:21 +01002389
2390 bio_init(&md->barrier_bio);
2391 md->barrier_bio.bi_bdev = md->bdev;
2392 md->barrier_bio.bi_rw = WRITE_BARRIER;
2393 __split_and_process_bio(md, &md->barrier_bio);
2394
2395 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002396}
2397
2398static void process_barrier(struct mapped_device *md, struct bio *bio)
2399{
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002400 md->barrier_error = 0;
2401
Mikulas Patocka531fe962009-06-22 10:12:17 +01002402 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002403
Mikulas Patocka5aa27812009-06-22 10:12:18 +01002404 if (!bio_empty_barrier(bio)) {
2405 __split_and_process_bio(md, bio);
Mikulas Patocka708e9292010-08-12 04:14:00 +01002406 /*
2407 * If the request isn't supported, don't waste time with
2408 * the second flush.
2409 */
2410 if (md->barrier_error != -EOPNOTSUPP)
2411 dm_flush(md);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002412 }
2413
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002414 if (md->barrier_error != DM_ENDIO_REQUEUE)
Mikulas Patocka531fe962009-06-22 10:12:17 +01002415 bio_endio(bio, md->barrier_error);
Mikulas Patocka2761e952009-06-22 10:12:18 +01002416 else {
2417 spin_lock_irq(&md->deferred_lock);
2418 bio_list_add_head(&md->deferred, bio);
2419 spin_unlock_irq(&md->deferred_lock);
2420 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01002421}
2422
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423/*
2424 * Process the deferred bios
2425 */
Mikulas Patockaef208582009-04-02 19:55:38 +01002426static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427{
Mikulas Patockaef208582009-04-02 19:55:38 +01002428 struct mapped_device *md = container_of(work, struct mapped_device,
2429 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002430 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431
Mikulas Patockaef208582009-04-02 19:55:38 +01002432 down_write(&md->io_lock);
2433
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002434 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002435 spin_lock_irq(&md->deferred_lock);
2436 c = bio_list_pop(&md->deferred);
2437 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002438
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002439 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002440 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01002441 break;
2442 }
2443
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002444 up_write(&md->io_lock);
2445
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002446 if (dm_request_based(md))
2447 generic_make_request(c);
2448 else {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02002449 if (c->bi_rw & REQ_HARDBARRIER)
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002450 process_barrier(md, c);
2451 else
2452 __split_and_process_bio(md, c);
2453 }
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002454
2455 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01002456 }
Milan Broz73d410c2008-02-08 02:10:25 +00002457
Mikulas Patockaef208582009-04-02 19:55:38 +01002458 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459}
2460
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002461static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00002462{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002463 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2464 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01002465 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00002466}
2467
Mike Snitzer57cba5d2010-08-12 04:14:04 +01002468static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002469{
2470 struct dm_rq_target_io *tio = clone->end_io_data;
2471
Mike Snitzer57cba5d2010-08-12 04:14:04 +01002472 tio->info.target_request_nr = request_nr;
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002473}
2474
2475/* Issue barrier requests to targets and wait for their completion. */
2476static int dm_rq_barrier(struct mapped_device *md)
2477{
2478 int i, j;
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002479 struct dm_table *map = dm_get_live_table(md);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002480 unsigned num_targets = dm_table_get_num_targets(map);
2481 struct dm_target *ti;
2482 struct request *clone;
2483
2484 md->barrier_error = 0;
2485
2486 for (i = 0; i < num_targets; i++) {
2487 ti = dm_table_get_target(map, i);
2488 for (j = 0; j < ti->num_flush_requests; j++) {
2489 clone = clone_rq(md->flush_request, md, GFP_NOIO);
Mike Snitzer57cba5d2010-08-12 04:14:04 +01002490 dm_rq_set_target_request_nr(clone, j);
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002491 atomic_inc(&md->pending[rq_data_dir(clone)]);
2492 map_request(ti, clone, md);
2493 }
2494 }
2495
2496 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2497 dm_table_put(map);
2498
2499 return md->barrier_error;
2500}
2501
2502static void dm_rq_barrier_work(struct work_struct *work)
2503{
2504 int error;
2505 struct mapped_device *md = container_of(work, struct mapped_device,
2506 barrier_work);
2507 struct request_queue *q = md->queue;
2508 struct request *rq;
2509 unsigned long flags;
2510
2511 /*
2512 * Hold the md reference here and leave it at the last part so that
2513 * the md can't be deleted by device opener when the barrier request
2514 * completes.
2515 */
2516 dm_get(md);
2517
2518 error = dm_rq_barrier(md);
2519
2520 rq = md->flush_request;
2521 md->flush_request = NULL;
2522
2523 if (error == DM_ENDIO_REQUEUE) {
2524 spin_lock_irqsave(q->queue_lock, flags);
2525 blk_requeue_request(q, rq);
2526 spin_unlock_irqrestore(q->queue_lock, flags);
2527 } else
2528 blk_end_request_all(rq, error);
2529
2530 blk_run_queue(q);
2531
2532 dm_put(md);
2533}
2534
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535/*
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002536 * Swap in a new table, returning the old one for the caller to destroy.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 */
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002538struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539{
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002540 struct dm_table *map = ERR_PTR(-EINVAL);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002541 struct queue_limits limits;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002542 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Daniel Walkere61290a2008-02-08 02:10:08 +00002544 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
2546 /* device must be suspended */
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002547 if (!dm_suspended_md(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002548 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002550 r = dm_calculate_queue_limits(table, &limits);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002551 if (r) {
2552 map = ERR_PTR(r);
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002553 goto out;
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002554 }
Mike Snitzer754c5fc2009-06-22 10:12:34 +01002555
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002556 map = __bind(md, table, &limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07002558out:
Daniel Walkere61290a2008-02-08 02:10:08 +00002559 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon042d2a92009-12-10 23:52:24 +00002560 return map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561}
2562
2563/*
2564 * Functions to lock and unlock any filesystem running on the
2565 * device.
2566 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002567static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002569 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570
2571 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002572
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002573 md->frozen_sb = freeze_bdev(md->bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002574 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002575 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08002576 md->frozen_sb = NULL;
2577 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07002578 }
2579
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002580 set_bit(DMF_FROZEN, &md->flags);
2581
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 return 0;
2583}
2584
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002585static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002587 if (!test_bit(DMF_FROZEN, &md->flags))
2588 return;
2589
Mikulas Patockadb8fef42009-06-22 10:12:15 +01002590 thaw_bdev(md->bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002592 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593}
2594
2595/*
2596 * We need to be able to change a mapping table under a mounted
2597 * filesystem. For example we might want to move some data in
2598 * the background. Before the table can be swapped with
2599 * dm_bind_table, dm_suspend must be called to flush any in
2600 * flight bios and ensure that any further io gets deferred.
2601 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002602/*
2603 * Suspend mechanism in request-based dm.
2604 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002605 * 1. Flush all I/Os by lock_fs() if needed.
2606 * 2. Stop dispatching any I/O by stopping the request_queue.
2607 * 3. Wait for all in-flight I/Os to be completed or requeued.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002608 *
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002609 * To abort suspend, start the request_queue.
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002610 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002611int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002613 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00002614 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08002615 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002616 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
Daniel Walkere61290a2008-02-08 02:10:08 +00002618 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002619
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002620 if (dm_suspended_md(md)) {
Milan Broz73d410c2008-02-08 02:10:25 +00002621 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08002622 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00002623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002625 map = dm_get_live_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002627 /*
2628 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2629 * This flag is cleared before dm_suspend returns.
2630 */
2631 if (noflush)
2632 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2633
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002634 /* This does not get reverted if there's an error later. */
2635 dm_table_presuspend_targets(map);
2636
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002637 /*
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002638 * Flush I/O to the device.
2639 * Any I/O submitted after lock_fs() may not be flushed.
2640 * noflush takes precedence over do_lockfs.
2641 * (lock_fs() flushes I/Os and waits for them to complete.)
Mikulas Patocka32a926d2009-06-22 10:12:17 +01002642 */
2643 if (!noflush && do_lockfs) {
2644 r = lock_fs(md);
2645 if (r)
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01002646 goto out;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08002647 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
2649 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002650 * Here we must make sure that no processes are submitting requests
2651 * to target drivers i.e. no one may be executing
2652 * __split_and_process_bio. This is called from dm_request and
2653 * dm_wq_work.
2654 *
2655 * To get all processes out of __split_and_process_bio in dm_request,
2656 * we take the write lock. To prevent any process from reentering
2657 * __split_and_process_bio from dm_request, we set
2658 * DMF_QUEUE_IO_TO_THREAD.
2659 *
2660 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2661 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2662 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2663 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002665 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01002666 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2667 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002668 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002670 /*
2671 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2672 * can be kicked until md->queue is stopped. So stop md->queue before
2673 * flushing md->wq.
2674 */
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002675 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002676 stop_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002677
Kiyoshi Uedad0bcb872009-12-10 23:52:18 +00002678 flush_workqueue(md->wq);
2679
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002681 * At this point no more requests are entering target request routines.
2682 * We call dm_wait_for_completion to wait for all existing requests
2683 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01002685 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002687 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00002688 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01002689 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00002690 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002691
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00002693 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002694 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00002695
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002696 if (dm_request_based(md))
Kiyoshi Ueda9f518b22009-12-10 23:52:16 +00002697 start_queue(md->queue);
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002698
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002699 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002700 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002701 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002702
Mikulas Patocka3b00b202009-04-09 00:27:15 +01002703 /*
2704 * If dm_wait_for_completion returned 0, the device is completely
2705 * quiescent now. There is no request-processing activity. All new
2706 * requests are being added to md->deferred list.
2707 */
2708
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 set_bit(DMF_SUSPENDED, &md->flags);
2710
Kiyoshi Ueda4d4471c2009-12-10 23:52:26 +00002711 dm_table_postsuspend_targets(map);
2712
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002713out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08002715
2716out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00002717 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002718 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719}
2720
2721int dm_resume(struct mapped_device *md)
2722{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002723 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002724 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Daniel Walkere61290a2008-02-08 02:10:08 +00002726 mutex_lock(&md->suspend_lock);
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002727 if (!dm_suspended_md(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002728 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002729
Alasdair G Kergon7c666412009-12-10 23:52:19 +00002730 map = dm_get_live_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002731 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002732 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733
Milan Broz8757b772006-10-03 01:15:36 -07002734 r = dm_table_resume_targets(map);
2735 if (r)
2736 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002737
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01002738 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002739
Kiyoshi Uedacec47e32009-06-22 10:12:35 +01002740 /*
2741 * Flushing deferred I/Os must be done after targets are resumed
2742 * so that mapping of targets can work correctly.
2743 * Request-based dm is queueing the deferred I/Os in its request_queue.
2744 */
2745 if (dm_request_based(md))
2746 start_queue(md->queue);
2747
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002748 unlock_fs(md);
2749
2750 clear_bit(DMF_SUSPENDED, &md->flags);
2751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 dm_table_unplug_all(map);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002753 r = 0;
2754out:
2755 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00002756 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07002757
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07002758 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759}
2760
2761/*-----------------------------------------------------------------
2762 * Event notification.
2763 *---------------------------------------------------------------*/
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002764int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
Milan Broz60935eb2009-06-22 10:12:30 +01002765 unsigned cookie)
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002766{
Milan Broz60935eb2009-06-22 10:12:30 +01002767 char udev_cookie[DM_COOKIE_LENGTH];
2768 char *envp[] = { udev_cookie, NULL };
2769
2770 if (!cookie)
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002771 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
Milan Broz60935eb2009-06-22 10:12:30 +01002772 else {
2773 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2774 DM_COOKIE_ENV_VAR_NAME, cookie);
Peter Rajnoha3abf85b2010-03-06 02:32:31 +00002775 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2776 action, envp);
Milan Broz60935eb2009-06-22 10:12:30 +01002777 }
Alasdair G Kergon69267a32007-12-13 14:15:57 +00002778}
2779
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002780uint32_t dm_next_uevent_seq(struct mapped_device *md)
2781{
2782 return atomic_add_return(1, &md->uevent_seq);
2783}
2784
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785uint32_t dm_get_event_nr(struct mapped_device *md)
2786{
2787 return atomic_read(&md->event_nr);
2788}
2789
2790int dm_wait_event(struct mapped_device *md, int event_nr)
2791{
2792 return wait_event_interruptible(md->eventq,
2793 (event_nr != atomic_read(&md->event_nr)));
2794}
2795
Mike Anderson7a8c3d32007-10-19 22:48:01 +01002796void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2797{
2798 unsigned long flags;
2799
2800 spin_lock_irqsave(&md->uevent_lock, flags);
2801 list_add(elist, &md->uevent_list);
2802 spin_unlock_irqrestore(&md->uevent_lock, flags);
2803}
2804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805/*
2806 * The gendisk is only valid as long as you have a reference
2807 * count on 'md'.
2808 */
2809struct gendisk *dm_disk(struct mapped_device *md)
2810{
2811 return md->disk;
2812}
2813
Milan Broz784aae72009-01-06 03:05:12 +00002814struct kobject *dm_kobject(struct mapped_device *md)
2815{
2816 return &md->kobj;
2817}
2818
2819/*
2820 * struct mapped_device should not be exported outside of dm.c
2821 * so use this check to verify that kobj is part of md structure
2822 */
2823struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2824{
2825 struct mapped_device *md;
2826
2827 md = container_of(kobj, struct mapped_device, kobj);
2828 if (&md->kobj != kobj)
2829 return NULL;
2830
Milan Broz4d89b7b2009-06-22 10:12:11 +01002831 if (test_bit(DMF_FREEING, &md->flags) ||
Mike Anderson432a2122009-12-10 23:52:20 +00002832 dm_deleting_md(md))
Milan Broz4d89b7b2009-06-22 10:12:11 +01002833 return NULL;
2834
Milan Broz784aae72009-01-06 03:05:12 +00002835 dm_get(md);
2836 return md;
2837}
2838
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +00002839int dm_suspended_md(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840{
2841 return test_bit(DMF_SUSPENDED, &md->flags);
2842}
2843
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002844int dm_suspended(struct dm_target *ti)
2845{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002846 return dm_suspended_md(dm_table_get_md(ti->table));
Kiyoshi Ueda64dbce52009-12-10 23:52:27 +00002847}
2848EXPORT_SYMBOL_GPL(dm_suspended);
2849
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002850int dm_noflush_suspending(struct dm_target *ti)
2851{
Kiyoshi Uedaecdb2e22010-03-06 02:29:52 +00002852 return __noflush_suspending(dm_table_get_md(ti->table));
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08002853}
2854EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2855
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +01002856struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2857{
2858 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2859
2860 if (!pools)
2861 return NULL;
2862
2863 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2864 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2865 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2866 if (!pools->io_pool)
2867 goto free_pools_and_out;
2868
2869 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2870 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2871 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2872 if (!pools->tio_pool)
2873 goto free_io_pool_and_out;
2874
2875 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2876 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2877 if (!pools->bs)
2878 goto free_tio_pool_and_out;
2879
2880 return pools;
2881
2882free_tio_pool_and_out:
2883 mempool_destroy(pools->tio_pool);
2884
2885free_io_pool_and_out:
2886 mempool_destroy(pools->io_pool);
2887
2888free_pools_and_out:
2889 kfree(pools);
2890
2891 return NULL;
2892}
2893
2894void dm_free_md_mempools(struct dm_md_mempools *pools)
2895{
2896 if (!pools)
2897 return;
2898
2899 if (pools->io_pool)
2900 mempool_destroy(pools->io_pool);
2901
2902 if (pools->tio_pool)
2903 mempool_destroy(pools->tio_pool);
2904
2905 if (pools->bs)
2906 bioset_free(pools->bs);
2907
2908 kfree(pools);
2909}
2910
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07002911static const struct block_device_operations dm_blk_dops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 .open = dm_blk_open,
2913 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07002914 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08002915 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 .owner = THIS_MODULE
2917};
2918
2919EXPORT_SYMBOL(dm_get_mapinfo);
2920
2921/*
2922 * module hooks
2923 */
2924module_init(dm_init);
2925module_exit(dm_exit);
2926
2927module_param(major, uint, 0);
2928MODULE_PARM_DESC(major, "The major number of the device mapper");
2929MODULE_DESCRIPTION(DM_NAME " driver");
2930MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2931MODULE_LICENSE("GPL");