blob: e2ee4a79ea2ca882896cf58e3e0f0c9d17ed5717 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz784aae72009-01-06 03:05:12 +00003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
Mike Anderson51e5b2b2007-10-19 22:48:00 +01009#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11#include <linux/init.h>
12#include <linux/module.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080013#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/idr.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080021#include <linux/hdreg.h>
Jens Axboe2056a782006-03-23 20:00:26 +010022#include <linux/blktrace_api.h>
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +010023#include <trace/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025#define DM_MSG_PREFIX "core"
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027static const char *_name = DM_NAME;
28
29static unsigned int major = 0;
30static unsigned int _major = 0;
31
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -070032static DEFINE_SPINLOCK(_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000034 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * One of these is allocated per bio.
36 */
37struct dm_io {
38 struct mapped_device *md;
39 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 atomic_t io_count;
Richard Kennedy6ae2fa62008-07-21 12:00:28 +010041 struct bio *bio;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -080042 unsigned long start_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043};
44
45/*
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000046 * For bio-based dm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * One of these is allocated per target within a bio. Hopefully
48 * this will be simplified out one day.
49 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +010050struct dm_target_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 struct dm_io *io;
52 struct dm_target *ti;
53 union map_info info;
54};
55
Ingo Molnar0bfc2452008-11-26 11:59:56 +010056DEFINE_TRACE(block_bio_complete);
57
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +000058/*
59 * For request-based dm.
60 * One of these is allocated per request.
61 */
62struct dm_rq_target_io {
63 struct mapped_device *md;
64 struct dm_target *ti;
65 struct request *orig, clone;
66 int error;
67 union map_info info;
68};
69
70/*
71 * For request-based dm.
72 * One of these is allocated per bio.
73 */
74struct dm_rq_clone_bio_info {
75 struct bio *orig;
76 struct request *rq;
77};
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079union map_info *dm_get_mapinfo(struct bio *bio)
80{
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070081 if (bio && bio->bi_private)
Alasdair G Kergon028867a2007-07-12 17:26:32 +010082 return &((struct dm_target_io *)bio->bi_private)->info;
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070083 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -070086#define MINOR_ALLOCED ((void *)-1)
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088/*
89 * Bits for the md->flags field.
90 */
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010091#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#define DMF_SUSPENDED 1
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -080093#define DMF_FROZEN 2
Jeff Mahoneyfba9f902006-06-26 00:27:23 -070094#define DMF_FREEING 3
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -070095#define DMF_DELETING 4
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -080096#define DMF_NOFLUSH_SUSPENDING 5
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +010097#define DMF_QUEUE_IO_TO_THREAD 6
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Milan Broz304f3f62008-02-08 02:11:17 +000099/*
100 * Work processed by per-device workqueue.
101 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102struct mapped_device {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700103 struct rw_semaphore io_lock;
Daniel Walkere61290a2008-02-08 02:10:08 +0000104 struct mutex suspend_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 rwlock_t map_lock;
106 atomic_t holders;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700107 atomic_t open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 unsigned long flags;
110
Jens Axboe165125e2007-07-24 09:28:11 +0200111 struct request_queue *queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 struct gendisk *disk;
Mike Anderson7e51f252006-03-27 01:17:52 -0800113 char name[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115 void *interface_ptr;
116
117 /*
118 * A list of ios that arrived while we were suspended.
119 */
120 atomic_t pending;
121 wait_queue_head_t wait;
Mikulas Patocka53d59142009-04-02 19:55:37 +0100122 struct work_struct work;
Kiyoshi Ueda74859362006-12-08 02:41:02 -0800123 struct bio_list deferred;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100124 spinlock_t deferred_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126 /*
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100127 * An error from the barrier request currently being processed.
128 */
129 int barrier_error;
130
131 /*
Milan Broz304f3f62008-02-08 02:11:17 +0000132 * Processing queue (flush/barriers)
133 */
134 struct workqueue_struct *wq;
135
136 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 * The current mapping.
138 */
139 struct dm_table *map;
140
141 /*
142 * io objects are allocated from here.
143 */
144 mempool_t *io_pool;
145 mempool_t *tio_pool;
146
Stefan Bader9faf4002006-10-03 01:15:41 -0700147 struct bio_set *bs;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 /*
150 * Event handling.
151 */
152 atomic_t event_nr;
153 wait_queue_head_t eventq;
Mike Anderson7a8c3d32007-10-19 22:48:01 +0100154 atomic_t uevent_seq;
155 struct list_head uevent_list;
156 spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 /*
159 * freeze/thaw support require holding onto a super block
160 */
161 struct super_block *frozen_sb;
Alasdair G Kergone39e2e92006-01-06 00:20:05 -0800162 struct block_device *suspended_bdev;
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800163
164 /* forced geometry settings */
165 struct hd_geometry geometry;
Milan Broz784aae72009-01-06 03:05:12 +0000166
167 /* sysfs handle */
168 struct kobject kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169};
170
171#define MIN_IOS 256
Christoph Lametere18b8902006-12-06 20:33:20 -0800172static struct kmem_cache *_io_cache;
173static struct kmem_cache *_tio_cache;
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000174static struct kmem_cache *_rq_tio_cache;
175static struct kmem_cache *_rq_bio_info_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177static int __init local_init(void)
178{
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100179 int r = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100182 _io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 if (!_io_cache)
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100184 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186 /* allocate a slab for the target ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100187 _tio_cache = KMEM_CACHE(dm_target_io, 0);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100188 if (!_tio_cache)
189 goto out_free_io_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000191 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
192 if (!_rq_tio_cache)
193 goto out_free_tio_cache;
194
195 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
196 if (!_rq_bio_info_cache)
197 goto out_free_rq_tio_cache;
198
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100199 r = dm_uevent_init();
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100200 if (r)
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000201 goto out_free_rq_bio_info_cache;
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 _major = major;
204 r = register_blkdev(_major, _name);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100205 if (r < 0)
206 goto out_uevent_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 if (!_major)
209 _major = r;
210
211 return 0;
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100212
213out_uevent_exit:
214 dm_uevent_exit();
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000215out_free_rq_bio_info_cache:
216 kmem_cache_destroy(_rq_bio_info_cache);
217out_free_rq_tio_cache:
218 kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda51157b42008-10-21 17:45:08 +0100219out_free_tio_cache:
220 kmem_cache_destroy(_tio_cache);
221out_free_io_cache:
222 kmem_cache_destroy(_io_cache);
223
224 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
227static void local_exit(void)
228{
Kiyoshi Ueda8fbf26a2009-01-06 03:05:06 +0000229 kmem_cache_destroy(_rq_bio_info_cache);
230 kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 kmem_cache_destroy(_tio_cache);
232 kmem_cache_destroy(_io_cache);
Akinobu Mita00d59402007-07-17 04:03:46 -0700233 unregister_blkdev(_major, _name);
Mike Anderson51e5b2b2007-10-19 22:48:00 +0100234 dm_uevent_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236 _major = 0;
237
238 DMINFO("cleaned up");
239}
240
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000241static int (*_inits[])(void) __initdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 local_init,
243 dm_target_init,
244 dm_linear_init,
245 dm_stripe_init,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100246 dm_kcopyd_init,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 dm_interface_init,
248};
249
Alasdair G Kergonb9249e52008-02-08 02:09:51 +0000250static void (*_exits[])(void) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 local_exit,
252 dm_target_exit,
253 dm_linear_exit,
254 dm_stripe_exit,
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100255 dm_kcopyd_exit,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 dm_interface_exit,
257};
258
259static int __init dm_init(void)
260{
261 const int count = ARRAY_SIZE(_inits);
262
263 int r, i;
264
265 for (i = 0; i < count; i++) {
266 r = _inits[i]();
267 if (r)
268 goto bad;
269 }
270
271 return 0;
272
273 bad:
274 while (i--)
275 _exits[i]();
276
277 return r;
278}
279
280static void __exit dm_exit(void)
281{
282 int i = ARRAY_SIZE(_exits);
283
284 while (i--)
285 _exits[i]();
286}
287
288/*
289 * Block device functions
290 */
Al Virofe5f9f22008-03-02 10:29:31 -0500291static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
293 struct mapped_device *md;
294
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700295 spin_lock(&_minor_lock);
296
Al Virofe5f9f22008-03-02 10:29:31 -0500297 md = bdev->bd_disk->private_data;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700298 if (!md)
299 goto out;
300
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700301 if (test_bit(DMF_FREEING, &md->flags) ||
302 test_bit(DMF_DELETING, &md->flags)) {
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700303 md = NULL;
304 goto out;
305 }
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 dm_get(md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700308 atomic_inc(&md->open_count);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -0700309
310out:
311 spin_unlock(&_minor_lock);
312
313 return md ? 0 : -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315
Al Virofe5f9f22008-03-02 10:29:31 -0500316static int dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
Al Virofe5f9f22008-03-02 10:29:31 -0500318 struct mapped_device *md = disk->private_data;
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700319 atomic_dec(&md->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 dm_put(md);
321 return 0;
322}
323
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700324int dm_open_count(struct mapped_device *md)
325{
326 return atomic_read(&md->open_count);
327}
328
329/*
330 * Guarantees nothing is using the device before it's deleted.
331 */
332int dm_lock_for_deletion(struct mapped_device *md)
333{
334 int r = 0;
335
336 spin_lock(&_minor_lock);
337
338 if (dm_open_count(md))
339 r = -EBUSY;
340 else
341 set_bit(DMF_DELETING, &md->flags);
342
343 spin_unlock(&_minor_lock);
344
345 return r;
346}
347
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800348static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
349{
350 struct mapped_device *md = bdev->bd_disk->private_data;
351
352 return dm_get_geometry(md, geo);
353}
354
Al Virofe5f9f22008-03-02 10:29:31 -0500355static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
Milan Brozaa129a22006-10-03 01:15:15 -0700356 unsigned int cmd, unsigned long arg)
357{
Al Virofe5f9f22008-03-02 10:29:31 -0500358 struct mapped_device *md = bdev->bd_disk->private_data;
359 struct dm_table *map = dm_get_table(md);
Milan Brozaa129a22006-10-03 01:15:15 -0700360 struct dm_target *tgt;
361 int r = -ENOTTY;
362
Milan Brozaa129a22006-10-03 01:15:15 -0700363 if (!map || !dm_table_get_size(map))
364 goto out;
365
366 /* We only support devices that have a single target */
367 if (dm_table_get_num_targets(map) != 1)
368 goto out;
369
370 tgt = dm_table_get_target(map, 0);
371
372 if (dm_suspended(md)) {
373 r = -EAGAIN;
374 goto out;
375 }
376
377 if (tgt->type->ioctl)
Al Viro647b3d02007-08-28 22:15:59 -0400378 r = tgt->type->ioctl(tgt, cmd, arg);
Milan Brozaa129a22006-10-03 01:15:15 -0700379
380out:
381 dm_table_put(map);
382
Milan Brozaa129a22006-10-03 01:15:15 -0700383 return r;
384}
385
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100386static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 return mempool_alloc(md->io_pool, GFP_NOIO);
389}
390
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100391static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
393 mempool_free(io, md->io_pool);
394}
395
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100396static struct dm_target_io *alloc_tio(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
398 return mempool_alloc(md->tio_pool, GFP_NOIO);
399}
400
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100401static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
403 mempool_free(tio, md->tio_pool);
404}
405
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800406static void start_io_acct(struct dm_io *io)
407{
408 struct mapped_device *md = io->md;
Tejun Heoc9959052008-08-25 19:47:21 +0900409 int cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800410
411 io->start_time = jiffies;
412
Tejun Heo074a7ac2008-08-25 19:56:14 +0900413 cpu = part_stat_lock();
414 part_round_stats(cpu, &dm_disk(md)->part0);
415 part_stat_unlock();
416 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800417}
418
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000419static void end_io_acct(struct dm_io *io)
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800420{
421 struct mapped_device *md = io->md;
422 struct bio *bio = io->bio;
423 unsigned long duration = jiffies - io->start_time;
Tejun Heoc9959052008-08-25 19:47:21 +0900424 int pending, cpu;
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800425 int rw = bio_data_dir(bio);
426
Tejun Heo074a7ac2008-08-25 19:56:14 +0900427 cpu = part_stat_lock();
428 part_round_stats(cpu, &dm_disk(md)->part0);
429 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
430 part_stat_unlock();
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800431
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100432 /*
433 * After this is decremented the bio must not be touched if it is
434 * a barrier.
435 */
Tejun Heo074a7ac2008-08-25 19:56:14 +0900436 dm_disk(md)->part0.in_flight = pending =
437 atomic_dec_return(&md->pending);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800438
Mikulas Patockad221d2e2008-11-13 23:39:10 +0000439 /* nudge anyone waiting on suspend queue */
440 if (!pending)
441 wake_up(&md->wait);
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800442}
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444/*
445 * Add the bio to the list of deferred io.
446 */
Mikulas Patocka92c63902009-04-09 00:27:15 +0100447static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700449 down_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Mikulas Patocka022c2612009-04-02 19:55:39 +0100451 spin_lock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 bio_list_add(&md->deferred, bio);
Mikulas Patocka022c2612009-04-02 19:55:39 +0100453 spin_unlock_irq(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Mikulas Patocka92c63902009-04-09 00:27:15 +0100455 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
456 queue_work(md->wq, &md->work);
457
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700458 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459}
460
461/*
462 * Everyone (including functions in this file), should use this
463 * function to access the md->map field, and make sure they call
464 * dm_table_put() when finished.
465 */
466struct dm_table *dm_get_table(struct mapped_device *md)
467{
468 struct dm_table *t;
469
470 read_lock(&md->map_lock);
471 t = md->map;
472 if (t)
473 dm_table_get(t);
474 read_unlock(&md->map_lock);
475
476 return t;
477}
478
Darrick J. Wong3ac51e72006-03-27 01:17:54 -0800479/*
480 * Get the geometry associated with a dm device
481 */
482int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
483{
484 *geo = md->geometry;
485
486 return 0;
487}
488
489/*
490 * Set the geometry of a device.
491 */
492int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
493{
494 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
495
496 if (geo->start > sz) {
497 DMWARN("Start sector is beyond the geometry limits.");
498 return -EINVAL;
499 }
500
501 md->geometry = *geo;
502
503 return 0;
504}
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506/*-----------------------------------------------------------------
507 * CRUD START:
508 * A more elegant soln is in the works that uses the queue
509 * merge fn, unfortunately there are a couple of changes to
510 * the block layer that I want to make for this. So in the
511 * interests of getting something for people to use I give
512 * you this clearly demarcated crap.
513 *---------------------------------------------------------------*/
514
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800515static int __noflush_suspending(struct mapped_device *md)
516{
517 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
518}
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520/*
521 * Decrements the number of outstanding ios that a bio has been
522 * cloned into, completing the original io if necc.
523 */
Arjan van de Ven858119e2006-01-14 13:20:43 -0800524static void dec_pending(struct dm_io *io, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800526 unsigned long flags;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000527 int io_error;
528 struct bio *bio;
529 struct mapped_device *md = io->md;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800530
531 /* Push-back supersedes any I/O errors */
Milan Brozb35f8ca2009-03-16 17:44:36 +0000532 if (error && !(io->error > 0 && __noflush_suspending(md)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 io->error = error;
534
535 if (atomic_dec_and_test(&io->io_count)) {
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800536 if (io->error == DM_ENDIO_REQUEUE) {
537 /*
538 * Target requested pushing back the I/O.
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800539 */
Mikulas Patocka022c2612009-04-02 19:55:39 +0100540 spin_lock_irqsave(&md->deferred_lock, flags);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000541 if (__noflush_suspending(md))
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100542 bio_list_add_head(&md->deferred, io->bio);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800543 else
544 /* noflush suspend was interrupted. */
545 io->error = -EIO;
Mikulas Patocka022c2612009-04-02 19:55:39 +0100546 spin_unlock_irqrestore(&md->deferred_lock, flags);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800547 }
548
Milan Brozb35f8ca2009-03-16 17:44:36 +0000549 io_error = io->error;
550 bio = io->bio;
Jens Axboe2056a782006-03-23 20:00:26 +0100551
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100552 if (bio_barrier(bio)) {
553 /*
554 * There can be just one barrier request so we use
555 * a per-device variable for error reporting.
556 * Note that you can't touch the bio after end_io_acct
557 */
558 md->barrier_error = io_error;
559 end_io_acct(io);
560 } else {
561 end_io_acct(io);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000562
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100563 if (io_error != DM_ENDIO_REQUEUE) {
564 trace_block_bio_complete(md->queue, bio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000565
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100566 bio_endio(bio, io_error);
567 }
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800568 }
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100569
570 free_io(md, io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
572}
573
NeilBrown6712ecf2007-09-27 12:47:43 +0200574static void clone_endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
576 int r = 0;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100577 struct dm_target_io *tio = bio->bi_private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000578 struct dm_io *io = tio->io;
Stefan Bader9faf4002006-10-03 01:15:41 -0700579 struct mapped_device *md = tio->io->md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 dm_endio_fn endio = tio->ti->type->end_io;
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
583 error = -EIO;
584
585 if (endio) {
586 r = endio(tio->ti, bio, error, &tio->info);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800587 if (r < 0 || r == DM_ENDIO_REQUEUE)
588 /*
589 * error and requeue request are handled
590 * in dec_pending().
591 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 error = r;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800593 else if (r == DM_ENDIO_INCOMPLETE)
594 /* The target will handle the io */
NeilBrown6712ecf2007-09-27 12:47:43 +0200595 return;
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800596 else if (r) {
597 DMWARN("unimplemented target endio return value: %d", r);
598 BUG();
599 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 }
601
Stefan Bader9faf4002006-10-03 01:15:41 -0700602 /*
603 * Store md for cleanup instead of tio which is about to get freed.
604 */
605 bio->bi_private = md->bs;
606
Stefan Bader9faf4002006-10-03 01:15:41 -0700607 free_tio(md, tio);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000608 bio_put(bio);
609 dec_pending(io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
612static sector_t max_io_len(struct mapped_device *md,
613 sector_t sector, struct dm_target *ti)
614{
615 sector_t offset = sector - ti->begin;
616 sector_t len = ti->len - offset;
617
618 /*
619 * Does the target need to split even further ?
620 */
621 if (ti->split_io) {
622 sector_t boundary;
623 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
624 - offset;
625 if (len > boundary)
626 len = boundary;
627 }
628
629 return len;
630}
631
632static void __map_bio(struct dm_target *ti, struct bio *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100633 struct dm_target_io *tio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
635 int r;
Jens Axboe2056a782006-03-23 20:00:26 +0100636 sector_t sector;
Stefan Bader9faf4002006-10-03 01:15:41 -0700637 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 /*
640 * Sanity checks.
641 */
642 BUG_ON(!clone->bi_size);
643
644 clone->bi_end_io = clone_endio;
645 clone->bi_private = tio;
646
647 /*
648 * Map the clone. If r == 0 we don't need to do
649 * anything, the target has assumed ownership of
650 * this io.
651 */
652 atomic_inc(&tio->io->io_count);
Jens Axboe2056a782006-03-23 20:00:26 +0100653 sector = clone->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 r = ti->type->map(ti, clone, &tio->info);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800655 if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 /* the bio has been remapped so dispatch it */
Jens Axboe2056a782006-03-23 20:00:26 +0100657
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100658 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
Alan D. Brunelle22a7c312009-05-04 16:35:08 -0400659 tio->io->bio->bi_bdev->bd_dev, sector);
Jens Axboe2056a782006-03-23 20:00:26 +0100660
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 generic_make_request(clone);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -0800662 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
663 /* error the io and bail out, or requeue it if needed */
Stefan Bader9faf4002006-10-03 01:15:41 -0700664 md = tio->io->md;
665 dec_pending(tio->io, r);
666 /*
667 * Store bio_set for cleanup.
668 */
669 clone->bi_private = md->bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 bio_put(clone);
Stefan Bader9faf4002006-10-03 01:15:41 -0700671 free_tio(md, tio);
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -0800672 } else if (r) {
673 DMWARN("unimplemented target map return value: %d", r);
674 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 }
676}
677
678struct clone_info {
679 struct mapped_device *md;
680 struct dm_table *map;
681 struct bio *bio;
682 struct dm_io *io;
683 sector_t sector;
684 sector_t sector_count;
685 unsigned short idx;
686};
687
Peter Osterlund36763472005-09-06 15:16:42 -0700688static void dm_bio_destructor(struct bio *bio)
689{
Stefan Bader9faf4002006-10-03 01:15:41 -0700690 struct bio_set *bs = bio->bi_private;
691
692 bio_free(bio, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700693}
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695/*
696 * Creates a little bio that is just does part of a bvec.
697 */
698static struct bio *split_bvec(struct bio *bio, sector_t sector,
699 unsigned short idx, unsigned int offset,
Stefan Bader9faf4002006-10-03 01:15:41 -0700700 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701{
702 struct bio *clone;
703 struct bio_vec *bv = bio->bi_io_vec + idx;
704
Stefan Bader9faf4002006-10-03 01:15:41 -0700705 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
Peter Osterlund36763472005-09-06 15:16:42 -0700706 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 *clone->bi_io_vec = *bv;
708
709 clone->bi_sector = sector;
710 clone->bi_bdev = bio->bi_bdev;
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100711 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 clone->bi_vcnt = 1;
713 clone->bi_size = to_bytes(len);
714 clone->bi_io_vec->bv_offset = offset;
715 clone->bi_io_vec->bv_len = clone->bi_size;
Martin K. Petersenf3e1d262008-10-21 17:45:04 +0100716 clone->bi_flags |= 1 << BIO_CLONED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Martin K. Petersen9c470082009-04-09 00:27:12 +0100718 if (bio_integrity(bio)) {
719 bio_integrity_clone(clone, bio, GFP_NOIO);
720 bio_integrity_trim(clone,
721 bio_sector_offset(bio, idx, offset), len);
722 }
723
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return clone;
725}
726
727/*
728 * Creates a bio that consists of range of complete bvecs.
729 */
730static struct bio *clone_bio(struct bio *bio, sector_t sector,
731 unsigned short idx, unsigned short bv_count,
Stefan Bader9faf4002006-10-03 01:15:41 -0700732 unsigned int len, struct bio_set *bs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
734 struct bio *clone;
735
Stefan Bader9faf4002006-10-03 01:15:41 -0700736 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
737 __bio_clone(clone, bio);
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100738 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
Stefan Bader9faf4002006-10-03 01:15:41 -0700739 clone->bi_destructor = dm_bio_destructor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 clone->bi_sector = sector;
741 clone->bi_idx = idx;
742 clone->bi_vcnt = idx + bv_count;
743 clone->bi_size = to_bytes(len);
744 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
745
Martin K. Petersen9c470082009-04-09 00:27:12 +0100746 if (bio_integrity(bio)) {
747 bio_integrity_clone(clone, bio, GFP_NOIO);
748
749 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
750 bio_integrity_trim(clone,
751 bio_sector_offset(bio, idx, 0), len);
752 }
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 return clone;
755}
756
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000757static int __clone_and_map(struct clone_info *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758{
759 struct bio *clone, *bio = ci->bio;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000760 struct dm_target *ti;
761 sector_t len = 0, max;
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100762 struct dm_target_io *tio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000764 ti = dm_table_find_target(ci->map, ci->sector);
765 if (!dm_target_is_valid(ti))
766 return -EIO;
767
768 max = max_io_len(ci->md, ci->sector, ti);
769
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 /*
771 * Allocate a target io object.
772 */
773 tio = alloc_tio(ci->md);
774 tio->io = ci->io;
775 tio->ti = ti;
776 memset(&tio->info, 0, sizeof(tio->info));
777
778 if (ci->sector_count <= max) {
779 /*
780 * Optimise for the simple case where we can do all of
781 * the remaining io with a single clone.
782 */
783 clone = clone_bio(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700784 bio->bi_vcnt - ci->idx, ci->sector_count,
785 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 __map_bio(ti, clone, tio);
787 ci->sector_count = 0;
788
789 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
790 /*
791 * There are some bvecs that don't span targets.
792 * Do as many of these as possible.
793 */
794 int i;
795 sector_t remaining = max;
796 sector_t bv_len;
797
798 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
799 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
800
801 if (bv_len > remaining)
802 break;
803
804 remaining -= bv_len;
805 len += bv_len;
806 }
807
Stefan Bader9faf4002006-10-03 01:15:41 -0700808 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
809 ci->md->bs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 __map_bio(ti, clone, tio);
811
812 ci->sector += len;
813 ci->sector_count -= len;
814 ci->idx = i;
815
816 } else {
817 /*
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800818 * Handle a bvec that must be split between two or more targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 */
820 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800821 sector_t remaining = to_sector(bv->bv_len);
822 unsigned int offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800824 do {
825 if (offset) {
826 ti = dm_table_find_target(ci->map, ci->sector);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000827 if (!dm_target_is_valid(ti))
828 return -EIO;
829
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800830 max = max_io_len(ci->md, ci->sector, ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800832 tio = alloc_tio(ci->md);
833 tio->io = ci->io;
834 tio->ti = ti;
835 memset(&tio->info, 0, sizeof(tio->info));
836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800838 len = min(remaining, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800840 clone = split_bvec(bio, ci->sector, ci->idx,
Stefan Bader9faf4002006-10-03 01:15:41 -0700841 bv->bv_offset + offset, len,
842 ci->md->bs);
Alasdair G Kergond2044a92006-03-22 00:07:42 -0800843
844 __map_bio(ti, clone, tio);
845
846 ci->sector += len;
847 ci->sector_count -= len;
848 offset += to_bytes(len);
849 } while (remaining -= len);
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 ci->idx++;
852 }
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000853
854 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855}
856
857/*
Mikulas Patocka8a53c282009-04-02 19:55:37 +0100858 * Split the bio into several clones and submit it to targets.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 */
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100860static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
862 struct clone_info ci;
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000863 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 ci.map = dm_get_table(md);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100866 if (unlikely(!ci.map)) {
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100867 if (!bio_barrier(bio))
868 bio_io_error(bio);
869 else
870 md->barrier_error = -EIO;
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100871 return;
872 }
Mikulas Patocka692d0eb2009-04-09 00:27:13 +0100873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 ci.md = md;
875 ci.bio = bio;
876 ci.io = alloc_io(md);
877 ci.io->error = 0;
878 atomic_set(&ci.io->io_count, 1);
879 ci.io->bio = bio;
880 ci.io->md = md;
881 ci.sector = bio->bi_sector;
882 ci.sector_count = bio_sectors(bio);
883 ci.idx = bio->bi_idx;
884
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -0800885 start_io_acct(ci.io);
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000886 while (ci.sector_count && !error)
887 error = __clone_and_map(&ci);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889 /* drop the extra reference count */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000890 dec_pending(ci.io, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 dm_table_put(ci.map);
892}
893/*-----------------------------------------------------------------
894 * CRUD END
895 *---------------------------------------------------------------*/
896
Milan Brozf6fccb12008-07-21 12:00:37 +0100897static int dm_merge_bvec(struct request_queue *q,
898 struct bvec_merge_data *bvm,
899 struct bio_vec *biovec)
900{
901 struct mapped_device *md = q->queuedata;
902 struct dm_table *map = dm_get_table(md);
903 struct dm_target *ti;
904 sector_t max_sectors;
Mikulas Patocka50371082008-10-01 14:39:17 +0100905 int max_size = 0;
Milan Brozf6fccb12008-07-21 12:00:37 +0100906
907 if (unlikely(!map))
Mikulas Patocka50371082008-10-01 14:39:17 +0100908 goto out;
Milan Brozf6fccb12008-07-21 12:00:37 +0100909
910 ti = dm_table_find_target(map, bvm->bi_sector);
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100911 if (!dm_target_is_valid(ti))
912 goto out_table;
Milan Brozf6fccb12008-07-21 12:00:37 +0100913
914 /*
915 * Find maximum amount of I/O that won't need splitting
916 */
917 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
918 (sector_t) BIO_MAX_SECTORS);
919 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
920 if (max_size < 0)
921 max_size = 0;
922
923 /*
924 * merge_bvec_fn() returns number of bytes
925 * it can accept at this offset
926 * max is precomputed maximal io size
927 */
928 if (max_size && ti->type->merge)
929 max_size = ti->type->merge(ti, bvm, biovec, max_size);
930
Mikulas Patockab01cd5a2008-10-01 14:39:24 +0100931out_table:
Mikulas Patocka50371082008-10-01 14:39:17 +0100932 dm_table_put(map);
933
934out:
Milan Brozf6fccb12008-07-21 12:00:37 +0100935 /*
936 * Always allow an entire first page
937 */
938 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
939 max_size = biovec->bv_len;
940
Milan Brozf6fccb12008-07-21 12:00:37 +0100941 return max_size;
942}
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944/*
945 * The request function that just remaps the bio built up by
946 * dm_merge_bvec.
947 */
Jens Axboe165125e2007-07-24 09:28:11 +0200948static int dm_request(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949{
Kevin Corry12f03a42006-02-01 03:04:52 -0800950 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 struct mapped_device *md = q->queuedata;
Tejun Heoc9959052008-08-25 19:47:21 +0900952 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700954 down_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
Tejun Heo074a7ac2008-08-25 19:56:14 +0900956 cpu = part_stat_lock();
957 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
958 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
959 part_stat_unlock();
Kevin Corry12f03a42006-02-01 03:04:52 -0800960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 /*
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +0100962 * If we're suspended or the thread is processing barriers
963 * we have to queue this io for later.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 */
Mikulas Patockaaf7e4662009-04-09 00:27:16 +0100965 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
966 unlikely(bio_barrier(bio))) {
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700967 up_read(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Alasdair G Kergon54d9a1b2009-04-09 00:27:14 +0100969 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
970 bio_rw(bio) == READA) {
971 bio_io_error(bio);
972 return 0;
973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Mikulas Patocka92c63902009-04-09 00:27:15 +0100975 queue_io(md, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Mikulas Patocka92c63902009-04-09 00:27:15 +0100977 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 }
979
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100980 __split_and_process_bio(md, bio);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -0700981 up_read(&md->io_lock);
Mikulas Patockaf0b9a452009-04-02 19:55:38 +0100982 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
984
Jens Axboe165125e2007-07-24 09:28:11 +0200985static void dm_unplug_all(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
987 struct mapped_device *md = q->queuedata;
988 struct dm_table *map = dm_get_table(md);
989
990 if (map) {
991 dm_table_unplug_all(map);
992 dm_table_put(map);
993 }
994}
995
996static int dm_any_congested(void *congested_data, int bdi_bits)
997{
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +0000998 int r = bdi_bits;
999 struct mapped_device *md = congested_data;
1000 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001002 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Chandra Seetharaman8a57dfc2008-11-13 23:39:14 +00001003 map = dm_get_table(md);
1004 if (map) {
1005 r = dm_table_any_congested(map, bdi_bits);
1006 dm_table_put(map);
1007 }
1008 }
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 return r;
1011}
1012
1013/*-----------------------------------------------------------------
1014 * An IDR is used to keep track of allocated minor numbers.
1015 *---------------------------------------------------------------*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016static DEFINE_IDR(_minor_idr);
1017
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001018static void free_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001020 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 idr_remove(&_minor_idr, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001022 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023}
1024
1025/*
1026 * See if the device with a specific minor # is free.
1027 */
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001028static int specific_minor(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029{
1030 int r, m;
1031
1032 if (minor >= (1 << MINORBITS))
1033 return -EINVAL;
1034
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001035 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1036 if (!r)
1037 return -ENOMEM;
1038
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001039 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 if (idr_find(&_minor_idr, minor)) {
1042 r = -EBUSY;
1043 goto out;
1044 }
1045
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001046 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001047 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
1050 if (m != minor) {
1051 idr_remove(&_minor_idr, m);
1052 r = -EBUSY;
1053 goto out;
1054 }
1055
1056out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001057 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 return r;
1059}
1060
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001061static int next_free_minor(int *minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062{
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001063 int r, m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
Jeff Mahoney62f75c22006-06-26 00:27:21 -07001066 if (!r)
1067 return -ENOMEM;
1068
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001069 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001071 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001072 if (r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
1075 if (m >= (1 << MINORBITS)) {
1076 idr_remove(&_minor_idr, m);
1077 r = -ENOSPC;
1078 goto out;
1079 }
1080
1081 *minor = m;
1082
1083out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001084 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 return r;
1086}
1087
1088static struct block_device_operations dm_blk_dops;
1089
Mikulas Patocka53d59142009-04-02 19:55:37 +01001090static void dm_wq_work(struct work_struct *work);
1091
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092/*
1093 * Allocate and initialise a blank device with a given minor.
1094 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001095static struct mapped_device *alloc_dev(int minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096{
1097 int r;
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001098 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001099 void *old_md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 if (!md) {
1102 DMWARN("unable to allocate device, out of memory.");
1103 return NULL;
1104 }
1105
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001106 if (!try_module_get(THIS_MODULE))
Milan Broz6ed7ade2008-02-08 02:10:19 +00001107 goto bad_module_get;
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 /* get a minor number for the dev */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001110 if (minor == DM_ANY_MINOR)
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001111 r = next_free_minor(&minor);
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001112 else
Frederik Deweerdtcf13ab82008-04-24 22:10:59 +01001113 r = specific_minor(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 if (r < 0)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001115 goto bad_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001117 init_rwsem(&md->io_lock);
Daniel Walkere61290a2008-02-08 02:10:08 +00001118 mutex_init(&md->suspend_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001119 spin_lock_init(&md->deferred_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 rwlock_init(&md->map_lock);
1121 atomic_set(&md->holders, 1);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -07001122 atomic_set(&md->open_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 atomic_set(&md->event_nr, 0);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001124 atomic_set(&md->uevent_seq, 0);
1125 INIT_LIST_HEAD(&md->uevent_list);
1126 spin_lock_init(&md->uevent_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 md->queue = blk_alloc_queue(GFP_KERNEL);
1129 if (!md->queue)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001130 goto bad_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132 md->queue->queuedata = md;
1133 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1134 md->queue->backing_dev_info.congested_data = md;
1135 blk_queue_make_request(md->queue, dm_request);
Mikulas Patocka99360b42009-04-02 19:55:39 +01001136 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
Jens Axboedaef2652006-01-10 10:48:02 +01001137 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 md->queue->unplug_fn = dm_unplug_all;
Milan Brozf6fccb12008-07-21 12:00:37 +01001139 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Matthew Dobson93d23412006-03-26 01:37:50 -08001141 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
Kiyoshi Ueda74859362006-12-08 02:41:02 -08001142 if (!md->io_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001143 goto bad_io_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Matthew Dobson93d23412006-03-26 01:37:50 -08001145 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 if (!md->tio_pool)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001147 goto bad_tio_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Jens Axboebb799ca2008-12-10 15:35:05 +01001149 md->bs = bioset_create(16, 0);
Stefan Bader9faf4002006-10-03 01:15:41 -07001150 if (!md->bs)
1151 goto bad_no_bioset;
1152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 md->disk = alloc_disk(1);
1154 if (!md->disk)
Milan Broz6ed7ade2008-02-08 02:10:19 +00001155 goto bad_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001157 atomic_set(&md->pending, 0);
1158 init_waitqueue_head(&md->wait);
Mikulas Patocka53d59142009-04-02 19:55:37 +01001159 INIT_WORK(&md->work, dm_wq_work);
Jeff Mahoneyf0b04112006-06-26 00:27:25 -07001160 init_waitqueue_head(&md->eventq);
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 md->disk->major = _major;
1163 md->disk->first_minor = minor;
1164 md->disk->fops = &dm_blk_dops;
1165 md->disk->queue = md->queue;
1166 md->disk->private_data = md;
1167 sprintf(md->disk->disk_name, "dm-%d", minor);
1168 add_disk(md->disk);
Mike Anderson7e51f252006-03-27 01:17:52 -08001169 format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
Milan Broz304f3f62008-02-08 02:11:17 +00001171 md->wq = create_singlethread_workqueue("kdmflush");
1172 if (!md->wq)
1173 goto bad_thread;
1174
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001175 /* Populate the mapping, nobody knows we exist yet */
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001176 spin_lock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001177 old_md = idr_replace(&_minor_idr, md, minor);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001178 spin_unlock(&_minor_lock);
Jeff Mahoneyba61fdd2006-06-26 00:27:21 -07001179
1180 BUG_ON(old_md != MINOR_ALLOCED);
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 return md;
1183
Milan Broz304f3f62008-02-08 02:11:17 +00001184bad_thread:
1185 put_disk(md->disk);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001186bad_disk:
Stefan Bader9faf4002006-10-03 01:15:41 -07001187 bioset_free(md->bs);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001188bad_no_bioset:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 mempool_destroy(md->tio_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001190bad_tio_pool:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 mempool_destroy(md->io_pool);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001192bad_io_pool:
Al Viro1312f402006-03-12 11:02:03 -05001193 blk_cleanup_queue(md->queue);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001194bad_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 free_minor(minor);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001196bad_minor:
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001197 module_put(THIS_MODULE);
Milan Broz6ed7ade2008-02-08 02:10:19 +00001198bad_module_get:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 kfree(md);
1200 return NULL;
1201}
1202
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001203static void unlock_fs(struct mapped_device *md);
1204
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205static void free_dev(struct mapped_device *md)
1206{
Tejun Heof331c022008-09-03 09:01:48 +02001207 int minor = MINOR(disk_devt(md->disk));
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001208
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001209 if (md->suspended_bdev) {
Jun'ichi Nomuraae9da832007-10-19 22:38:43 +01001210 unlock_fs(md);
Jun'ichi Nomurad9dde592006-02-24 13:04:24 -08001211 bdput(md->suspended_bdev);
1212 }
Milan Broz304f3f62008-02-08 02:11:17 +00001213 destroy_workqueue(md->wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 mempool_destroy(md->tio_pool);
1215 mempool_destroy(md->io_pool);
Stefan Bader9faf4002006-10-03 01:15:41 -07001216 bioset_free(md->bs);
Martin K. Petersen9c470082009-04-09 00:27:12 +01001217 blk_integrity_unregister(md->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 del_gendisk(md->disk);
Jun'ichi Nomura63d94e42006-02-24 13:04:25 -08001219 free_minor(minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001220
1221 spin_lock(&_minor_lock);
1222 md->disk->private_data = NULL;
1223 spin_unlock(&_minor_lock);
1224
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 put_disk(md->disk);
Al Viro1312f402006-03-12 11:02:03 -05001226 blk_cleanup_queue(md->queue);
Jeff Mahoney10da4f72006-06-26 00:27:25 -07001227 module_put(THIS_MODULE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 kfree(md);
1229}
1230
1231/*
1232 * Bind a table to the device.
1233 */
1234static void event_callback(void *context)
1235{
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001236 unsigned long flags;
1237 LIST_HEAD(uevents);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 struct mapped_device *md = (struct mapped_device *) context;
1239
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001240 spin_lock_irqsave(&md->uevent_lock, flags);
1241 list_splice_init(&md->uevent_list, &uevents);
1242 spin_unlock_irqrestore(&md->uevent_lock, flags);
1243
Tejun Heoed9e1982008-08-25 19:56:05 +09001244 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 atomic_inc(&md->event_nr);
1247 wake_up(&md->eventq);
1248}
1249
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001250static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251{
Alasdair G Kergon4e901882005-07-28 21:15:59 -07001252 set_capacity(md->disk, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001254 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001255 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08001256 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
1258
1259static int __bind(struct mapped_device *md, struct dm_table *t)
1260{
Jens Axboe165125e2007-07-24 09:28:11 +02001261 struct request_queue *q = md->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 sector_t size;
1263
1264 size = dm_table_get_size(t);
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001265
1266 /*
1267 * Wipe any geometry if the size of the table changed.
1268 */
1269 if (size != get_capacity(md->disk))
1270 memset(&md->geometry, 0, sizeof(md->geometry));
1271
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001272 if (md->suspended_bdev)
1273 __set_size(md, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Mikulas Patockad5816872009-01-06 03:05:10 +00001275 if (!size) {
1276 dm_table_destroy(t);
1277 return 0;
1278 }
1279
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001280 dm_table_event_callback(t, event_callback, md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001281
1282 write_lock(&md->map_lock);
1283 md->map = t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 dm_table_set_restrictions(t, q);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001285 write_unlock(&md->map_lock);
1286
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 return 0;
1288}
1289
1290static void __unbind(struct mapped_device *md)
1291{
1292 struct dm_table *map = md->map;
1293
1294 if (!map)
1295 return;
1296
1297 dm_table_event_callback(map, NULL, NULL);
1298 write_lock(&md->map_lock);
1299 md->map = NULL;
1300 write_unlock(&md->map_lock);
Mikulas Patockad5816872009-01-06 03:05:10 +00001301 dm_table_destroy(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302}
1303
1304/*
1305 * Constructor for a new device.
1306 */
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001307int dm_create(int minor, struct mapped_device **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308{
1309 struct mapped_device *md;
1310
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07001311 md = alloc_dev(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 if (!md)
1313 return -ENXIO;
1314
Milan Broz784aae72009-01-06 03:05:12 +00001315 dm_sysfs_init(md);
1316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 *result = md;
1318 return 0;
1319}
1320
David Teigland637842c2006-01-06 00:20:00 -08001321static struct mapped_device *dm_find_md(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
1323 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 unsigned minor = MINOR(dev);
1325
1326 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1327 return NULL;
1328
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001329 spin_lock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
1331 md = idr_find(&_minor_idr, minor);
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001332 if (md && (md == MINOR_ALLOCED ||
Tejun Heof331c022008-09-03 09:01:48 +02001333 (MINOR(disk_devt(dm_disk(md))) != minor) ||
Alasdair G Kergon17b2f662006-06-26 00:27:33 -07001334 test_bit(DMF_FREEING, &md->flags))) {
David Teigland637842c2006-01-06 00:20:00 -08001335 md = NULL;
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001336 goto out;
1337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001339out:
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001340 spin_unlock(&_minor_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
David Teigland637842c2006-01-06 00:20:00 -08001342 return md;
1343}
1344
David Teiglandd229a952006-01-06 00:20:01 -08001345struct mapped_device *dm_get_md(dev_t dev)
1346{
1347 struct mapped_device *md = dm_find_md(dev);
1348
1349 if (md)
1350 dm_get(md);
1351
1352 return md;
1353}
1354
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001355void *dm_get_mdptr(struct mapped_device *md)
David Teigland637842c2006-01-06 00:20:00 -08001356{
Alasdair G Kergon9ade92a2006-03-27 01:17:53 -08001357 return md->interface_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358}
1359
1360void dm_set_mdptr(struct mapped_device *md, void *ptr)
1361{
1362 md->interface_ptr = ptr;
1363}
1364
1365void dm_get(struct mapped_device *md)
1366{
1367 atomic_inc(&md->holders);
1368}
1369
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001370const char *dm_device_name(struct mapped_device *md)
1371{
1372 return md->name;
1373}
1374EXPORT_SYMBOL_GPL(dm_device_name);
1375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376void dm_put(struct mapped_device *md)
1377{
Mike Anderson1134e5a2006-03-27 01:17:54 -08001378 struct dm_table *map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001380 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1381
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001382 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
Mike Anderson1134e5a2006-03-27 01:17:54 -08001383 map = dm_get_table(md);
Tejun Heof331c022008-09-03 09:01:48 +02001384 idr_replace(&_minor_idr, MINOR_ALLOCED,
1385 MINOR(disk_devt(dm_disk(md))));
Jeff Mahoneyfba9f902006-06-26 00:27:23 -07001386 set_bit(DMF_FREEING, &md->flags);
Jeff Mahoneyf32c10b2006-06-26 00:27:22 -07001387 spin_unlock(&_minor_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001388 if (!dm_suspended(md)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 dm_table_presuspend_targets(map);
1390 dm_table_postsuspend_targets(map);
1391 }
Milan Broz784aae72009-01-06 03:05:12 +00001392 dm_sysfs_exit(md);
Mike Anderson1134e5a2006-03-27 01:17:54 -08001393 dm_table_put(map);
Mikulas Patockaa1b51e92009-01-06 03:04:53 +00001394 __unbind(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 free_dev(md);
1396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397}
Edward Goggin79eb8852007-05-09 02:32:56 -07001398EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Mikulas Patocka401600d2009-04-02 19:55:38 +01001400static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
Milan Broz46125c12008-02-08 02:10:30 +00001401{
1402 int r = 0;
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001403 DECLARE_WAITQUEUE(wait, current);
1404
1405 dm_unplug_all(md->queue);
1406
1407 add_wait_queue(&md->wait, &wait);
Milan Broz46125c12008-02-08 02:10:30 +00001408
1409 while (1) {
Mikulas Patocka401600d2009-04-02 19:55:38 +01001410 set_current_state(interruptible);
Milan Broz46125c12008-02-08 02:10:30 +00001411
1412 smp_mb();
1413 if (!atomic_read(&md->pending))
1414 break;
1415
Mikulas Patocka401600d2009-04-02 19:55:38 +01001416 if (interruptible == TASK_INTERRUPTIBLE &&
1417 signal_pending(current)) {
Milan Broz46125c12008-02-08 02:10:30 +00001418 r = -EINTR;
1419 break;
1420 }
1421
1422 io_schedule();
1423 }
1424 set_current_state(TASK_RUNNING);
1425
Mikulas Patockab44ebeb2009-04-02 19:55:39 +01001426 remove_wait_queue(&md->wait, &wait);
1427
Milan Broz46125c12008-02-08 02:10:30 +00001428 return r;
1429}
1430
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001431static int dm_flush(struct mapped_device *md)
1432{
1433 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1434 return 0;
1435}
1436
1437static void process_barrier(struct mapped_device *md, struct bio *bio)
1438{
1439 int error = dm_flush(md);
1440
1441 if (unlikely(error)) {
1442 bio_endio(bio, error);
1443 return;
1444 }
1445 if (bio_empty_barrier(bio)) {
1446 bio_endio(bio, 0);
1447 return;
1448 }
1449
1450 __split_and_process_bio(md, bio);
1451
1452 error = dm_flush(md);
1453
1454 if (!error && md->barrier_error)
1455 error = md->barrier_error;
1456
1457 if (md->barrier_error != DM_ENDIO_REQUEUE)
1458 bio_endio(bio, error);
1459}
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461/*
1462 * Process the deferred bios
1463 */
Mikulas Patockaef208582009-04-02 19:55:38 +01001464static void dm_wq_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
Mikulas Patockaef208582009-04-02 19:55:38 +01001466 struct mapped_device *md = container_of(work, struct mapped_device,
1467 work);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001468 struct bio *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
Mikulas Patockaef208582009-04-02 19:55:38 +01001470 down_write(&md->io_lock);
1471
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001472 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001473 spin_lock_irq(&md->deferred_lock);
1474 c = bio_list_pop(&md->deferred);
1475 spin_unlock_irq(&md->deferred_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001476
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001477 if (!c) {
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001478 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergondf12ee92009-04-09 00:27:13 +01001479 break;
1480 }
1481
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001482 up_write(&md->io_lock);
1483
Mikulas Patockaaf7e4662009-04-09 00:27:16 +01001484 if (bio_barrier(c))
1485 process_barrier(md, c);
1486 else
1487 __split_and_process_bio(md, c);
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001488
1489 down_write(&md->io_lock);
Mikulas Patocka022c2612009-04-02 19:55:39 +01001490 }
Milan Broz73d410c2008-02-08 02:10:25 +00001491
Mikulas Patockaef208582009-04-02 19:55:38 +01001492 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493}
1494
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001495static void dm_queue_flush(struct mapped_device *md)
Milan Broz304f3f62008-02-08 02:11:17 +00001496{
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001497 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1498 smp_mb__after_clear_bit();
Mikulas Patocka53d59142009-04-02 19:55:37 +01001499 queue_work(md->wq, &md->work);
Milan Broz304f3f62008-02-08 02:11:17 +00001500}
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502/*
1503 * Swap in a new table (destroying old one).
1504 */
1505int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1506{
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001507 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Daniel Walkere61290a2008-02-08 02:10:08 +00001509 mutex_lock(&md->suspend_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510
1511 /* device must be suspended */
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001512 if (!dm_suspended(md))
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001513 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001515 /* without bdev, the device size cannot be changed */
1516 if (!md->suspended_bdev)
1517 if (get_capacity(md->disk) != dm_table_get_size(table))
1518 goto out;
1519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 __unbind(md);
1521 r = __bind(md, table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001523out:
Daniel Walkere61290a2008-02-08 02:10:08 +00001524 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon93c534a2005-07-12 15:53:05 -07001525 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526}
1527
1528/*
1529 * Functions to lock and unlock any filesystem running on the
1530 * device.
1531 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001532static int lock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533{
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001534 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
1536 WARN_ON(md->frozen_sb);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001537
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001538 md->frozen_sb = freeze_bdev(md->suspended_bdev);
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001539 if (IS_ERR(md->frozen_sb)) {
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001540 r = PTR_ERR(md->frozen_sb);
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001541 md->frozen_sb = NULL;
1542 return r;
Alasdair G Kergondfbe03f2005-05-05 16:16:04 -07001543 }
1544
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001545 set_bit(DMF_FROZEN, &md->flags);
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 /* don't bdput right now, we don't want the bdev
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001548 * to go away while it is locked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 */
1550 return 0;
1551}
1552
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001553static void unlock_fs(struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554{
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001555 if (!test_bit(DMF_FROZEN, &md->flags))
1556 return;
1557
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001558 thaw_bdev(md->suspended_bdev, md->frozen_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 md->frozen_sb = NULL;
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001560 clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561}
1562
1563/*
1564 * We need to be able to change a mapping table under a mounted
1565 * filesystem. For example we might want to move some data in
1566 * the background. Before the table can be swapped with
1567 * dm_bind_table, dm_suspend must be called to flush any in
1568 * flight bios and ensure that any further io gets deferred.
1569 */
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001570int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001572 struct dm_table *map = NULL;
Milan Broz46125c12008-02-08 02:10:30 +00001573 int r = 0;
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -08001574 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001575 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Daniel Walkere61290a2008-02-08 02:10:08 +00001577 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001578
Milan Broz73d410c2008-02-08 02:10:25 +00001579 if (dm_suspended(md)) {
1580 r = -EINVAL;
Alasdair G Kergond2874832006-11-08 17:44:43 -08001581 goto out_unlock;
Milan Broz73d410c2008-02-08 02:10:25 +00001582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584 map = dm_get_table(md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001586 /*
1587 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1588 * This flag is cleared before dm_suspend returns.
1589 */
1590 if (noflush)
1591 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1592
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001593 /* This does not get reverted if there's an error later. */
1594 dm_table_presuspend_targets(map);
1595
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001596 /* bdget() can stall if the pending I/Os are not flushed */
1597 if (!noflush) {
1598 md->suspended_bdev = bdget_disk(md->disk, 0);
1599 if (!md->suspended_bdev) {
1600 DMWARN("bdget failed in dm_suspend");
1601 r = -ENOMEM;
Kiyoshi Uedaf431d962008-10-21 17:45:07 +01001602 goto out;
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001603 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001604
Milan Broz6d6f10d2008-02-08 02:10:22 +00001605 /*
1606 * Flush I/O to the device. noflush supersedes do_lockfs,
1607 * because lock_fs() needs to flush I/Os.
1608 */
1609 if (do_lockfs) {
1610 r = lock_fs(md);
1611 if (r)
1612 goto out;
1613 }
Alasdair G Kergonaa8d7c22006-01-06 00:20:06 -08001614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001617 * Here we must make sure that no processes are submitting requests
1618 * to target drivers i.e. no one may be executing
1619 * __split_and_process_bio. This is called from dm_request and
1620 * dm_wq_work.
1621 *
1622 * To get all processes out of __split_and_process_bio in dm_request,
1623 * we take the write lock. To prevent any process from reentering
1624 * __split_and_process_bio from dm_request, we set
1625 * DMF_QUEUE_IO_TO_THREAD.
1626 *
1627 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1628 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1629 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1630 * further calls to __split_and_process_bio from dm_wq_work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001632 down_write(&md->io_lock);
Alasdair G Kergon1eb787e2009-04-09 00:27:14 +01001633 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1634 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001635 up_write(&md->io_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001637 flush_workqueue(md->wq);
1638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 /*
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001640 * At this point no more requests are entering target request routines.
1641 * We call dm_wait_for_completion to wait for all existing requests
1642 * to finish.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 */
Mikulas Patocka401600d2009-04-02 19:55:38 +01001644 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001646 down_write(&md->io_lock);
Milan Broz6d6f10d2008-02-08 02:10:22 +00001647 if (noflush)
Mikulas Patocka022c2612009-04-02 19:55:39 +01001648 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
Milan Broz94d63512008-02-08 02:10:27 +00001649 up_write(&md->io_lock);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 /* were we interrupted ? */
Milan Broz46125c12008-02-08 02:10:30 +00001652 if (r < 0) {
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001653 dm_queue_flush(md);
Milan Broz73d410c2008-02-08 02:10:25 +00001654
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001655 unlock_fs(md);
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001656 goto out; /* pushback list is already flushed, so skip flush */
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001657 }
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001658
Mikulas Patocka3b00b202009-04-09 00:27:15 +01001659 /*
1660 * If dm_wait_for_completion returned 0, the device is completely
1661 * quiescent now. There is no request-processing activity. All new
1662 * requests are being added to md->deferred list.
1663 */
1664
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001665 dm_table_postsuspend_targets(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
1667 set_bit(DMF_SUSPENDED, &md->flags);
1668
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001669out:
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001670 if (r && md->suspended_bdev) {
1671 bdput(md->suspended_bdev);
1672 md->suspended_bdev = NULL;
1673 }
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 dm_table_put(map);
Alasdair G Kergond2874832006-11-08 17:44:43 -08001676
1677out_unlock:
Daniel Walkere61290a2008-02-08 02:10:08 +00001678 mutex_unlock(&md->suspend_lock);
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001679 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680}
1681
1682int dm_resume(struct mapped_device *md)
1683{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001684 int r = -EINVAL;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001685 struct dm_table *map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
Daniel Walkere61290a2008-02-08 02:10:08 +00001687 mutex_lock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001688 if (!dm_suspended(md))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001689 goto out;
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001690
1691 map = dm_get_table(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001692 if (!map || !dm_table_get_size(map))
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001693 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Milan Broz8757b772006-10-03 01:15:36 -07001695 r = dm_table_resume_targets(map);
1696 if (r)
1697 goto out;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001698
Mikulas Patocka9a1fb462009-04-02 19:55:36 +01001699 dm_queue_flush(md);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001700
1701 unlock_fs(md);
1702
Jun'ichi Nomurabfa152f2007-01-26 00:57:07 -08001703 if (md->suspended_bdev) {
1704 bdput(md->suspended_bdev);
1705 md->suspended_bdev = NULL;
1706 }
Alasdair G Kergone39e2e92006-01-06 00:20:05 -08001707
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001708 clear_bit(DMF_SUSPENDED, &md->flags);
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 dm_table_unplug_all(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001712 dm_kobject_uevent(md);
Hannes Reinecke8560ed62006-10-03 01:15:35 -07001713
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001714 r = 0;
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001715
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001716out:
1717 dm_table_put(map);
Daniel Walkere61290a2008-02-08 02:10:08 +00001718 mutex_unlock(&md->suspend_lock);
Alasdair G Kergon2ca33102005-07-28 21:16:00 -07001719
Alasdair G Kergoncf222b32005-07-28 21:15:57 -07001720 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721}
1722
1723/*-----------------------------------------------------------------
1724 * Event notification.
1725 *---------------------------------------------------------------*/
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001726void dm_kobject_uevent(struct mapped_device *md)
1727{
Tejun Heoed9e1982008-08-25 19:56:05 +09001728 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
Alasdair G Kergon69267a32007-12-13 14:15:57 +00001729}
1730
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001731uint32_t dm_next_uevent_seq(struct mapped_device *md)
1732{
1733 return atomic_add_return(1, &md->uevent_seq);
1734}
1735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736uint32_t dm_get_event_nr(struct mapped_device *md)
1737{
1738 return atomic_read(&md->event_nr);
1739}
1740
1741int dm_wait_event(struct mapped_device *md, int event_nr)
1742{
1743 return wait_event_interruptible(md->eventq,
1744 (event_nr != atomic_read(&md->event_nr)));
1745}
1746
Mike Anderson7a8c3d32007-10-19 22:48:01 +01001747void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1748{
1749 unsigned long flags;
1750
1751 spin_lock_irqsave(&md->uevent_lock, flags);
1752 list_add(elist, &md->uevent_list);
1753 spin_unlock_irqrestore(&md->uevent_lock, flags);
1754}
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756/*
1757 * The gendisk is only valid as long as you have a reference
1758 * count on 'md'.
1759 */
1760struct gendisk *dm_disk(struct mapped_device *md)
1761{
1762 return md->disk;
1763}
1764
Milan Broz784aae72009-01-06 03:05:12 +00001765struct kobject *dm_kobject(struct mapped_device *md)
1766{
1767 return &md->kobj;
1768}
1769
1770/*
1771 * struct mapped_device should not be exported outside of dm.c
1772 * so use this check to verify that kobj is part of md structure
1773 */
1774struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1775{
1776 struct mapped_device *md;
1777
1778 md = container_of(kobj, struct mapped_device, kobj);
1779 if (&md->kobj != kobj)
1780 return NULL;
1781
1782 dm_get(md);
1783 return md;
1784}
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786int dm_suspended(struct mapped_device *md)
1787{
1788 return test_bit(DMF_SUSPENDED, &md->flags);
1789}
1790
Kiyoshi Ueda2e93ccc2006-12-08 02:41:09 -08001791int dm_noflush_suspending(struct dm_target *ti)
1792{
1793 struct mapped_device *md = dm_table_get_md(ti->table);
1794 int r = __noflush_suspending(md);
1795
1796 dm_put(md);
1797
1798 return r;
1799}
1800EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802static struct block_device_operations dm_blk_dops = {
1803 .open = dm_blk_open,
1804 .release = dm_blk_close,
Milan Brozaa129a22006-10-03 01:15:15 -07001805 .ioctl = dm_blk_ioctl,
Darrick J. Wong3ac51e72006-03-27 01:17:54 -08001806 .getgeo = dm_blk_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 .owner = THIS_MODULE
1808};
1809
1810EXPORT_SYMBOL(dm_get_mapinfo);
1811
1812/*
1813 * module hooks
1814 */
1815module_init(dm_init);
1816module_exit(dm_exit);
1817
1818module_param(major, uint, 0);
1819MODULE_PARM_DESC(major, "The major number of the device mapper");
1820MODULE_DESCRIPTION(DM_NAME " driver");
1821MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1822MODULE_LICENSE("GPL");