blob: 53089aa113877475c250a54cd4cd772c9fa31424 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01003 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Jonathan Brassow06386bb2008-02-08 02:11:37 +00008#include "dm-bio-record.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/workqueue.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010016#include <linux/device-mapper.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010017#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010020#include <linux/dm-region-hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Alasdair G Kergon72d94862006-06-26 00:27:35 -070022#define DM_MSG_PREFIX "raid1"
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010023
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010025#define DM_KCOPYD_PAGES 64
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070027#define DM_RAID1_HANDLE_ERRORS 0x01
Jonathan Brassowf44db672007-07-12 17:29:04 +010028#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070029
Jonathan E Brassow33184042006-11-08 17:44:44 -080030static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/*-----------------------------------------------------------------
Neil Browne4c8b3b2006-06-26 00:27:26 -070033 * Mirror set structures.
34 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +000035enum dm_raid1_error {
36 DM_RAID1_WRITE_ERROR,
Mikulas Patocka64b30c42009-12-10 23:52:02 +000037 DM_RAID1_FLUSH_ERROR,
Jonathan Brassow72f4b312008-02-08 02:11:29 +000038 DM_RAID1_SYNC_ERROR,
39 DM_RAID1_READ_ERROR
40};
41
Neil Browne4c8b3b2006-06-26 00:27:26 -070042struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +010043 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -070044 atomic_t error_count;
Al Viro39ed7ad2008-02-13 03:53:00 +000045 unsigned long error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -070046 struct dm_dev *dev;
47 sector_t offset;
48};
49
50struct mirror_set {
51 struct dm_target *ti;
52 struct list_head list;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010053
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070054 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -070055
Jonathan Brassow72f4b312008-02-08 02:11:29 +000056 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -070057 struct bio_list reads;
58 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +000059 struct bio_list failures;
Mikulas Patocka04788502009-12-10 23:52:03 +000060 struct bio_list holds; /* bios are waiting until suspend */
Neil Browne4c8b3b2006-06-26 00:27:26 -070061
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010062 struct dm_region_hash *rh;
63 struct dm_kcopyd_client *kcopyd_client;
Milan Broz88be1632007-05-09 02:33:04 -070064 struct dm_io_client *io_client;
Jonathan Brassow06386bb2008-02-08 02:11:37 +000065 mempool_t *read_record_pool;
Milan Broz88be1632007-05-09 02:33:04 -070066
Neil Browne4c8b3b2006-06-26 00:27:26 -070067 /* recovery */
68 region_t nr_regions;
69 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +010070 int log_failure;
Mikulas Patocka929be8f2009-12-10 23:52:06 +000071 int leg_failure;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +000072 atomic_t suspend;
Neil Browne4c8b3b2006-06-26 00:27:26 -070073
Jonathan Brassow72f4b312008-02-08 02:11:29 +000074 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -070075
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070076 struct workqueue_struct *kmirrord_wq;
77 struct work_struct kmirrord_work;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010078 struct timer_list timer;
79 unsigned long timer_pending;
80
Jonathan Brassow72f4b312008-02-08 02:11:29 +000081 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070082
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010083 unsigned nr_mirrors;
Neil Browne4c8b3b2006-06-26 00:27:26 -070084 struct mirror mirror[0];
85};
86
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010087static void wakeup_mirrord(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010089 struct mirror_set *ms = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070091 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
92}
93
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010094static void delayed_wake_fn(unsigned long data)
95{
96 struct mirror_set *ms = (struct mirror_set *) data;
97
98 clear_bit(0, &ms->timer_pending);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010099 wakeup_mirrord(ms);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100100}
101
102static void delayed_wake(struct mirror_set *ms)
103{
104 if (test_and_set_bit(0, &ms->timer_pending))
105 return;
106
107 ms->timer.expires = jiffies + HZ / 5;
108 ms->timer.data = (unsigned long) ms;
109 ms->timer.function = delayed_wake_fn;
110 add_timer(&ms->timer);
111}
112
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100113static void wakeup_all_recovery_waiters(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100115 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100118static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 int should_wake = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100122 struct bio_list *bl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100124 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
125 spin_lock_irqsave(&ms->lock, flags);
126 should_wake = !(bl->head);
127 bio_list_add(bl, bio);
128 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 if (should_wake)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100131 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100134static void dispatch_bios(void *context, struct bio_list *bio_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100136 struct mirror_set *ms = context;
137 struct bio *bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100139 while ((bio = bio_list_pop(bio_list)))
140 queue_bio(ms, bio, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141}
142
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000143#define MIN_READ_RECORDS 20
144struct dm_raid1_read_record {
145 struct mirror *m;
146 struct dm_bio_details details;
147};
148
Mikulas Patocka95f8fac2009-04-02 19:55:24 +0100149static struct kmem_cache *_dm_raid1_read_record_cache;
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151/*
152 * Every mirror should look like this one.
153 */
154#define DEFAULT_MIRROR 0
155
156/*
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000157 * This is yucky. We squirrel the mirror struct away inside
158 * bi_next for read/write buffers. This is safe since the bh
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 * doesn't get submitted to the lower levels of block layer.
160 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000161static struct mirror *bio_get_m(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000163 return (struct mirror *) bio->bi_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}
165
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000166static void bio_set_m(struct bio *bio, struct mirror *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000168 bio->bi_next = (struct bio *) m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000171static struct mirror *get_default_mirror(struct mirror_set *ms)
172{
173 return &ms->mirror[atomic_read(&ms->default_mirror)];
174}
175
176static void set_default_mirror(struct mirror *m)
177{
178 struct mirror_set *ms = m->ms;
179 struct mirror *m0 = &(ms->mirror[0]);
180
181 atomic_set(&ms->default_mirror, m - m0);
182}
183
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000184static struct mirror *get_valid_mirror(struct mirror_set *ms)
185{
186 struct mirror *m;
187
188 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
189 if (!atomic_read(&m->error_count))
190 return m;
191
192 return NULL;
193}
194
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000195/* fail_mirror
196 * @m: mirror device to fail
197 * @error_type: one of the enum's, DM_RAID1_*_ERROR
198 *
199 * If errors are being handled, record the type of
200 * error encountered for this device. If this type
201 * of error has already been recorded, we can return;
202 * otherwise, we must signal userspace by triggering
203 * an event. Additionally, if the device is the
204 * primary device, we must choose a new primary, but
205 * only if the mirror is in-sync.
206 *
207 * This function must not block.
208 */
209static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
210{
211 struct mirror_set *ms = m->ms;
212 struct mirror *new;
213
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000214 ms->leg_failure = 1;
215
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000216 /*
217 * error_count is used for nothing more than a
218 * simple way to tell if a device has encountered
219 * errors.
220 */
221 atomic_inc(&m->error_count);
222
223 if (test_and_set_bit(error_type, &m->error_type))
224 return;
225
Jonathan Brassowd460c652009-01-06 03:04:57 +0000226 if (!errors_handled(ms))
227 return;
228
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000229 if (m != get_default_mirror(ms))
230 goto out;
231
232 if (!ms->in_sync) {
233 /*
234 * Better to issue requests to same failing device
235 * than to risk returning corrupt data.
236 */
237 DMERR("Primary mirror (%s) failed while out-of-sync: "
238 "Reads may fail.", m->dev->name);
239 goto out;
240 }
241
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000242 new = get_valid_mirror(ms);
243 if (new)
244 set_default_mirror(new);
245 else
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000246 DMWARN("All sides of mirror have failed.");
247
248out:
249 schedule_work(&ms->trigger_event);
250}
251
Mikulas Patockac0da3742009-12-10 23:52:02 +0000252static int mirror_flush(struct dm_target *ti)
253{
254 struct mirror_set *ms = ti->private;
255 unsigned long error_bits;
256
257 unsigned int i;
258 struct dm_io_region io[ms->nr_mirrors];
259 struct mirror *m;
260 struct dm_io_request io_req = {
Tejun Heod87f4c12010-09-03 11:56:19 +0200261 .bi_rw = WRITE_FLUSH,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000262 .mem.type = DM_IO_KMEM,
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000263 .mem.ptr.addr = NULL,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000264 .client = ms->io_client,
265 };
266
267 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
268 io[i].bdev = m->dev->bdev;
269 io[i].sector = 0;
270 io[i].count = 0;
271 }
272
273 error_bits = -1;
274 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
275 if (unlikely(error_bits != 0)) {
276 for (i = 0; i < ms->nr_mirrors; i++)
277 if (test_bit(i, &error_bits))
278 fail_mirror(ms->mirror + i,
Mikulas Patocka64b30c42009-12-10 23:52:02 +0000279 DM_RAID1_FLUSH_ERROR);
Mikulas Patockac0da3742009-12-10 23:52:02 +0000280 return -EIO;
281 }
282
283 return 0;
284}
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286/*-----------------------------------------------------------------
287 * Recovery.
288 *
289 * When a mirror is first activated we may find that some regions
290 * are in the no-sync state. We have to recover these by
291 * recopying from the default mirror to all the others.
292 *---------------------------------------------------------------*/
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700293static void recovery_complete(int read_err, unsigned long write_err,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 void *context)
295{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100296 struct dm_region *reg = context;
297 struct mirror_set *ms = dm_rh_region_context(reg);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000298 int m, bit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000300 if (read_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100301 /* Read error means the failure of default mirror. */
302 DMERR_LIMIT("Unable to read primary mirror during recovery");
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000303 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
304 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100305
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000306 if (write_err) {
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700307 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
Jonathan Brassowf44db672007-07-12 17:29:04 +0100308 write_err);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000309 /*
310 * Bits correspond to devices (excluding default mirror).
311 * The default mirror cannot change during recovery.
312 */
313 for (m = 0; m < ms->nr_mirrors; m++) {
314 if (&ms->mirror[m] == get_default_mirror(ms))
315 continue;
316 if (test_bit(bit, &write_err))
317 fail_mirror(ms->mirror + m,
318 DM_RAID1_SYNC_ERROR);
319 bit++;
320 }
321 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100322
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100323 dm_rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100326static int recover(struct mirror_set *ms, struct dm_region *reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
328 int r;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100329 unsigned i;
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100330 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 struct mirror *m;
332 unsigned long flags = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100333 region_t key = dm_rh_get_region_key(reg);
334 sector_t region_size = dm_rh_get_region_size(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000337 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 from.bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100339 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
340 if (key == (ms->nr_regions - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 /*
342 * The final region may be smaller than
343 * region_size.
344 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100345 from.count = ms->ti->len & (region_size - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 if (!from.count)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100347 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 } else
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100349 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351 /* fill in the destinations */
352 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000353 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 continue;
355
356 m = ms->mirror + i;
357 dest->bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100358 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 dest->count = from.count;
360 dest++;
361 }
362
363 /* hand to kcopyd */
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100364 if (!errors_handled(ms))
365 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
366
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100367 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
368 flags, recovery_complete, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 return r;
371}
372
373static void do_recovery(struct mirror_set *ms)
374{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100375 struct dm_region *reg;
376 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 /*
380 * Start quiescing some regions.
381 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100382 dm_rh_recovery_prepare(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 /*
385 * Copy any already quiesced regions.
386 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100387 while ((reg = dm_rh_recovery_start(ms->rh))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 r = recover(ms, reg);
389 if (r)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100390 dm_rh_recovery_end(reg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
392
393 /*
394 * Update the in sync flag.
395 */
396 if (!ms->in_sync &&
397 (log->type->get_sync_count(log) == ms->nr_regions)) {
398 /* the sync is complete */
399 dm_table_event(ms->ti->table);
400 ms->in_sync = 1;
401 }
402}
403
404/*-----------------------------------------------------------------
405 * Reads
406 *---------------------------------------------------------------*/
407static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
408{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000409 struct mirror *m = get_default_mirror(ms);
410
411 do {
412 if (likely(!atomic_read(&m->error_count)))
413 return m;
414
415 if (m-- == ms->mirror)
416 m += ms->nr_mirrors;
417 } while (m != get_default_mirror(ms));
418
419 return NULL;
420}
421
422static int default_ok(struct mirror *m)
423{
424 struct mirror *default_mirror = get_default_mirror(m->ms);
425
426 return !atomic_read(&default_mirror->error_count);
427}
428
429static int mirror_available(struct mirror_set *ms, struct bio *bio)
430{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100431 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
432 region_t region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000433
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100434 if (log->type->in_sync(log, region, 0))
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000435 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
436
437 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438}
439
440/*
441 * remap a buffer to a particular mirror.
442 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000443static sector_t map_sector(struct mirror *m, struct bio *bio)
444{
Mikulas Patocka41841532009-12-10 23:51:59 +0000445 if (unlikely(!bio->bi_size))
446 return 0;
Alasdair G Kergonb441a2622010-08-12 04:14:11 +0100447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000448}
449
450static void map_bio(struct mirror *m, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
452 bio->bi_bdev = m->dev->bdev;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000453 bio->bi_sector = map_sector(m, bio);
454}
455
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100456static void map_region(struct dm_io_region *io, struct mirror *m,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000457 struct bio *bio)
458{
459 io->bdev = m->dev->bdev;
460 io->sector = map_sector(m, bio);
461 io->count = bio->bi_size >> 9;
462}
463
Mikulas Patocka04788502009-12-10 23:52:03 +0000464static void hold_bio(struct mirror_set *ms, struct bio *bio)
465{
466 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +0000467 * Lock is required to avoid race condition during suspend
468 * process.
Mikulas Patocka04788502009-12-10 23:52:03 +0000469 */
Takahiro Yasuif0703042010-03-06 02:32:35 +0000470 spin_lock_irq(&ms->lock);
471
Mikulas Patocka04788502009-12-10 23:52:03 +0000472 if (atomic_read(&ms->suspend)) {
Takahiro Yasuif0703042010-03-06 02:32:35 +0000473 spin_unlock_irq(&ms->lock);
474
475 /*
476 * If device is suspended, complete the bio.
477 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000478 if (dm_noflush_suspending(ms->ti))
479 bio_endio(bio, DM_ENDIO_REQUEUE);
480 else
481 bio_endio(bio, -EIO);
482 return;
483 }
484
485 /*
486 * Hold bio until the suspend is complete.
487 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000488 bio_list_add(&ms->holds, bio);
489 spin_unlock_irq(&ms->lock);
490}
491
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000492/*-----------------------------------------------------------------
493 * Reads
494 *---------------------------------------------------------------*/
495static void read_callback(unsigned long error, void *context)
496{
497 struct bio *bio = context;
498 struct mirror *m;
499
500 m = bio_get_m(bio);
501 bio_set_m(bio, NULL);
502
503 if (likely(!error)) {
504 bio_endio(bio, 0);
505 return;
506 }
507
508 fail_mirror(m, DM_RAID1_READ_ERROR);
509
510 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
511 DMWARN_LIMIT("Read failure on mirror device %s. "
512 "Trying alternative device.",
513 m->dev->name);
514 queue_bio(m->ms, bio, bio_rw(bio));
515 return;
516 }
517
518 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
519 m->dev->name);
520 bio_endio(bio, -EIO);
521}
522
523/* Asynchronous read. */
524static void read_async_bio(struct mirror *m, struct bio *bio)
525{
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100526 struct dm_io_region io;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000527 struct dm_io_request io_req = {
528 .bi_rw = READ,
529 .mem.type = DM_IO_BVEC,
530 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
531 .notify.fn = read_callback,
532 .notify.context = bio,
533 .client = m->ms->io_client,
534 };
535
536 map_region(&io, m, bio);
537 bio_set_m(bio, m);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100538 BUG_ON(dm_io(&io_req, 1, &io, NULL));
539}
540
541static inline int region_in_sync(struct mirror_set *ms, region_t region,
542 int may_block)
543{
544 int state = dm_rh_get_state(ms->rh, region, may_block);
545 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
548static void do_reads(struct mirror_set *ms, struct bio_list *reads)
549{
550 region_t region;
551 struct bio *bio;
552 struct mirror *m;
553
554 while ((bio = bio_list_pop(reads))) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100555 region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000556 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 /*
559 * We can only read balance if the region is in sync.
560 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100561 if (likely(region_in_sync(ms, region, 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 m = choose_mirror(ms, bio->bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000563 else if (m && atomic_read(&m->error_count))
564 m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000566 if (likely(m))
567 read_async_bio(m, bio);
568 else
569 bio_endio(bio, -EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571}
572
573/*-----------------------------------------------------------------
574 * Writes.
575 *
576 * We do different things with the write io depending on the
577 * state of the region that it's in:
578 *
579 * SYNC: increment pending, use kcopyd to write to *all* mirrors
580 * RECOVERING: delay the io until recovery completes
581 * NOSYNC: increment pending, just write to the default mirror
582 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000583
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585static void write_callback(unsigned long error, void *context)
586{
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000587 unsigned i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 struct bio *bio = (struct bio *) context;
589 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000590 int should_wake = 0;
591 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000593 ms = bio_get_m(bio)->ms;
594 bio_set_m(bio, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
596 /*
597 * NOTE: We don't decrement the pending count here,
598 * instead it is done by the targets endio function.
599 * This way we handle both writes to SYNC and NOSYNC
600 * regions with the same code.
601 */
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000602 if (likely(!error)) {
603 bio_endio(bio, ret);
604 return;
605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000607 for (i = 0; i < ms->nr_mirrors; i++)
608 if (test_bit(i, &error))
609 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000610
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000611 /*
612 * Need to raise event. Since raising
613 * events can block, we need to do it in
614 * the main thread.
615 */
616 spin_lock_irqsave(&ms->lock, flags);
617 if (!ms->failures.head)
618 should_wake = 1;
619 bio_list_add(&ms->failures, bio);
620 spin_unlock_irqrestore(&ms->lock, flags);
621 if (should_wake)
622 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623}
624
625static void do_write(struct mirror_set *ms, struct bio *bio)
626{
627 unsigned int i;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100628 struct dm_io_region io[ms->nr_mirrors], *dest = io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 struct mirror *m;
Milan Broz88be1632007-05-09 02:33:04 -0700630 struct dm_io_request io_req = {
Tejun Heod87f4c12010-09-03 11:56:19 +0200631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
Milan Broz88be1632007-05-09 02:33:04 -0700632 .mem.type = DM_IO_BVEC,
633 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
634 .notify.fn = write_callback,
635 .notify.context = bio,
636 .client = ms->io_client,
637 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000639 if (bio->bi_rw & REQ_DISCARD) {
640 io_req.bi_rw |= REQ_DISCARD;
641 io_req.mem.type = DM_IO_KMEM;
642 io_req.mem.ptr.addr = NULL;
643 }
644
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000645 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
646 map_region(dest++, m, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000648 /*
649 * Use default mirror because we only need it to retrieve the reference
650 * to the mirror set in write_callback().
651 */
652 bio_set_m(bio, get_default_mirror(ms));
Milan Broz88be1632007-05-09 02:33:04 -0700653
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100654 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
657static void do_writes(struct mirror_set *ms, struct bio_list *writes)
658{
659 int state;
660 struct bio *bio;
661 struct bio_list sync, nosync, recover, *this_list = NULL;
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100662 struct bio_list requeue;
663 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
664 region_t region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 if (!writes->head)
667 return;
668
669 /*
670 * Classify each write.
671 */
672 bio_list_init(&sync);
673 bio_list_init(&nosync);
674 bio_list_init(&recover);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100675 bio_list_init(&requeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677 while ((bio = bio_list_pop(writes))) {
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000678 if ((bio->bi_rw & REQ_FLUSH) ||
679 (bio->bi_rw & REQ_DISCARD)) {
Mikulas Patocka41841532009-12-10 23:51:59 +0000680 bio_list_add(&sync, bio);
681 continue;
682 }
683
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100684 region = dm_rh_bio_to_region(ms->rh, bio);
685
686 if (log->type->is_remote_recovering &&
687 log->type->is_remote_recovering(log, region)) {
688 bio_list_add(&requeue, bio);
689 continue;
690 }
691
692 state = dm_rh_get_state(ms->rh, region, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 switch (state) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100694 case DM_RH_CLEAN:
695 case DM_RH_DIRTY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 this_list = &sync;
697 break;
698
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100699 case DM_RH_NOSYNC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 this_list = &nosync;
701 break;
702
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100703 case DM_RH_RECOVERING:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 this_list = &recover;
705 break;
706 }
707
708 bio_list_add(this_list, bio);
709 }
710
711 /*
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100712 * Add bios that are delayed due to remote recovery
713 * back on to the write queue
714 */
715 if (unlikely(requeue.head)) {
716 spin_lock_irq(&ms->lock);
717 bio_list_merge(&ms->writes, &requeue);
718 spin_unlock_irq(&ms->lock);
Mikulas Patocka69885682009-07-23 20:30:37 +0100719 delayed_wake(ms);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100720 }
721
722 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * Increment the pending counts for any regions that will
724 * be written to (writes to recover regions are going to
725 * be delayed).
726 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100727 dm_rh_inc_pending(ms->rh, &sync);
728 dm_rh_inc_pending(ms->rh, &nosync);
Jonathan Brassowd2b69862009-09-04 20:40:32 +0100729
730 /*
731 * If the flush fails on a previous call and succeeds here,
732 * we must not reset the log_failure variable. We need
733 * userspace interaction to do that.
734 */
735 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 /*
738 * Dispatch io.
739 */
Mikulas Patocka5528d172010-02-16 18:42:55 +0000740 if (unlikely(ms->log_failure) && errors_handled(ms)) {
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000741 spin_lock_irq(&ms->lock);
742 bio_list_merge(&ms->failures, &sync);
743 spin_unlock_irq(&ms->lock);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100744 wakeup_mirrord(ms);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000745 } else
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100746 while ((bio = bio_list_pop(&sync)))
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000747 do_write(ms, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
749 while ((bio = bio_list_pop(&recover)))
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100750 dm_rh_delay(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 while ((bio = bio_list_pop(&nosync))) {
Mikulas Patockaede5ea02010-03-06 02:32:22 +0000753 if (unlikely(ms->leg_failure) && errors_handled(ms)) {
754 spin_lock_irq(&ms->lock);
755 bio_list_add(&ms->failures, bio);
756 spin_unlock_irq(&ms->lock);
757 wakeup_mirrord(ms);
758 } else {
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000759 map_bio(get_default_mirror(ms), bio);
760 generic_make_request(bio);
761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
763}
764
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000765static void do_failures(struct mirror_set *ms, struct bio_list *failures)
766{
767 struct bio *bio;
768
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000769 if (likely(!failures->head))
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000770 return;
771
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000772 /*
773 * If the log has failed, unattempted writes are being
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000774 * put on the holds list. We can't issue those writes
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000775 * until a log has been marked, so we must store them.
776 *
777 * If a 'noflush' suspend is in progress, we can requeue
778 * the I/O's to the core. This give userspace a chance
779 * to reconfigure the mirror, at which point the core
780 * will reissue the writes. If the 'noflush' flag is
781 * not set, we have no choice but to return errors.
782 *
783 * Some writes on the failures list may have been
784 * submitted before the log failure and represent a
785 * failure to write to one of the devices. It is ok
786 * for us to treat them the same and requeue them
787 * as well.
788 */
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000789 while ((bio = bio_list_pop(failures))) {
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000790 if (!ms->log_failure) {
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000791 ms->in_sync = 0;
Mikulas Patockac58098b2009-12-10 23:52:05 +0000792 dm_rh_mark_nosync(ms->rh, bio);
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000793 }
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000794
795 /*
796 * If all the legs are dead, fail the I/O.
797 * If we have been told to handle errors, hold the bio
798 * and wait for userspace to deal with the problem.
799 * Otherwise pretend that the I/O succeeded. (This would
800 * be wrong if the failed leg returned after reboot and
801 * got replicated back to the good legs.)
802 */
803 if (!get_valid_mirror(ms))
804 bio_endio(bio, -EIO);
805 else if (errors_handled(ms))
806 hold_bio(ms, bio);
807 else
808 bio_endio(bio, 0);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000809 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000810}
811
812static void trigger_event(struct work_struct *work)
813{
814 struct mirror_set *ms =
815 container_of(work, struct mirror_set, trigger_event);
816
817 dm_table_event(ms->ti->table);
818}
819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820/*-----------------------------------------------------------------
821 * kmirrord
822 *---------------------------------------------------------------*/
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100823static void do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100825 struct mirror_set *ms = container_of(work, struct mirror_set,
826 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000827 struct bio_list reads, writes, failures;
828 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000830 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 reads = ms->reads;
832 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000833 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 bio_list_init(&ms->reads);
835 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000836 bio_list_init(&ms->failures);
837 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100839 dm_rh_update_states(ms->rh, errors_handled(ms));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 do_recovery(ms);
841 do_reads(ms, &reads);
842 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000843 do_failures(ms, &failures);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000844}
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846/*-----------------------------------------------------------------
847 * Target functions
848 *---------------------------------------------------------------*/
849static struct mirror_set *alloc_context(unsigned int nr_mirrors,
850 uint32_t region_size,
851 struct dm_target *ti,
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100852 struct dm_dirty_log *dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 size_t len;
855 struct mirror_set *ms = NULL;
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
858
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700859 ms = kzalloc(len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700861 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return NULL;
863 }
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 spin_lock_init(&ms->lock);
Mikulas Patocka5339fc22009-12-10 23:52:06 +0000866 bio_list_init(&ms->reads);
867 bio_list_init(&ms->writes);
868 bio_list_init(&ms->failures);
869 bio_list_init(&ms->holds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 ms->ti = ti;
872 ms->nr_mirrors = nr_mirrors;
873 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
874 ms->in_sync = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000875 ms->log_failure = 0;
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000876 ms->leg_failure = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000877 atomic_set(&ms->suspend, 0);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000878 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Mikulas Patocka95f8fac2009-04-02 19:55:24 +0100880 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
881 _dm_raid1_read_record_cache);
882
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000883 if (!ms->read_record_pool) {
884 ti->error = "Error creating mirror read_record_pool";
885 kfree(ms);
886 return NULL;
887 }
888
Mikulas Patockabda8efe2011-05-29 13:03:09 +0100889 ms->io_client = dm_io_client_create();
Milan Broz88be1632007-05-09 02:33:04 -0700890 if (IS_ERR(ms->io_client)) {
891 ti->error = "Error creating dm_io client";
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000892 mempool_destroy(ms->read_record_pool);
Milan Broz88be1632007-05-09 02:33:04 -0700893 kfree(ms);
894 return NULL;
895 }
896
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100897 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
898 wakeup_all_recovery_waiters,
899 ms->ti->begin, MAX_RECOVERY,
900 dl, region_size, ms->nr_regions);
901 if (IS_ERR(ms->rh)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700902 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +0100903 dm_io_client_destroy(ms->io_client);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000904 mempool_destroy(ms->read_record_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 kfree(ms);
906 return NULL;
907 }
908
909 return ms;
910}
911
912static void free_context(struct mirror_set *ms, struct dm_target *ti,
913 unsigned int m)
914{
915 while (m--)
916 dm_put_device(ti, ms->mirror[m].dev);
917
Milan Broz88be1632007-05-09 02:33:04 -0700918 dm_io_client_destroy(ms->io_client);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100919 dm_region_hash_destroy(ms->rh);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000920 mempool_destroy(ms->read_record_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 kfree(ms);
922}
923
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
925 unsigned int mirror, char **argv)
926{
Andrew Morton4ee218c2006-03-27 01:17:48 -0800927 unsigned long long offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Andrew Morton4ee218c2006-03-27 01:17:48 -0800929 if (sscanf(argv[1], "%llu", &offset) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700930 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 return -EINVAL;
932 }
933
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000934 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 &ms->mirror[mirror].dev)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700936 ti->error = "Device lookup failure";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 return -ENXIO;
938 }
939
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100940 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000941 atomic_set(&(ms->mirror[mirror].error_count), 0);
942 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 ms->mirror[mirror].offset = offset;
944
945 return 0;
946}
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948/*
949 * Create dirty log: log_type #log_params <log_params>
950 */
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100951static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100952 unsigned argc, char **argv,
953 unsigned *args_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100955 unsigned param_count;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100956 struct dm_dirty_log *dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
958 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700959 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 return NULL;
961 }
962
963 if (sscanf(argv[1], "%u", &param_count) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700964 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 return NULL;
966 }
967
968 *args_used = 2 + param_count;
969
970 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700971 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 return NULL;
973 }
974
Mikulas Patockac0da3742009-12-10 23:52:02 +0000975 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
976 argv + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700978 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 return NULL;
980 }
981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 return dl;
983}
984
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -0700985static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
986 unsigned *args_used)
987{
988 unsigned num_features;
989 struct dm_target *ti = ms->ti;
990
991 *args_used = 0;
992
993 if (!argc)
994 return 0;
995
996 if (sscanf(argv[0], "%u", &num_features) != 1) {
997 ti->error = "Invalid number of features";
998 return -EINVAL;
999 }
1000
1001 argc--;
1002 argv++;
1003 (*args_used)++;
1004
1005 if (num_features > argc) {
1006 ti->error = "Not enough arguments to support feature count";
1007 return -EINVAL;
1008 }
1009
1010 if (!strcmp("handle_errors", argv[0]))
1011 ms->features |= DM_RAID1_HANDLE_ERRORS;
1012 else {
1013 ti->error = "Unrecognised feature requested";
1014 return -EINVAL;
1015 }
1016
1017 (*args_used)++;
1018
1019 return 0;
1020}
1021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022/*
1023 * Construct a mirror mapping:
1024 *
1025 * log_type #log_params <log_params>
1026 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001027 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 *
1029 * log_type is "core" or "disk"
1030 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001031 *
1032 * If present, features must be "handle_errors".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1035{
1036 int r;
1037 unsigned int nr_mirrors, m, args_used;
1038 struct mirror_set *ms;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001039 struct dm_dirty_log *dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 dl = create_dirty_log(ti, argc, argv, &args_used);
1042 if (!dl)
1043 return -EINVAL;
1044
1045 argv += args_used;
1046 argc -= args_used;
1047
1048 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001049 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001050 ti->error = "Invalid number of mirrors";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001051 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 return -EINVAL;
1053 }
1054
1055 argv++, argc--;
1056
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001057 if (argc < nr_mirrors * 2) {
1058 ti->error = "Too few mirror arguments";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001059 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 return -EINVAL;
1061 }
1062
1063 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1064 if (!ms) {
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001065 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 return -ENOMEM;
1067 }
1068
1069 /* Get the mirror parameter sets */
1070 for (m = 0; m < nr_mirrors; m++) {
1071 r = get_mirror(ms, ti, m, argv);
1072 if (r) {
1073 free_context(ms, ti, m);
1074 return r;
1075 }
1076 argv += 2;
1077 argc -= 2;
1078 }
1079
1080 ti->private = ms;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001081 ti->split_io = dm_rh_get_region_size(ms->rh);
Mikulas Patocka41841532009-12-10 23:51:59 +00001082 ti->num_flush_requests = 1;
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +00001083 ti->num_discard_requests = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Tejun Heo9c4376d2011-01-13 19:59:58 +00001085 ms->kmirrord_wq = alloc_workqueue("kmirrord",
1086 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001087 if (!ms->kmirrord_wq) {
1088 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001089 r = -ENOMEM;
1090 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001091 }
1092 INIT_WORK(&ms->kmirrord_work, do_mirror);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001093 init_timer(&ms->timer);
1094 ms->timer_pending = 0;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001095 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001096
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001097 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001098 if (r)
1099 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001100
1101 argv += args_used;
1102 argc -= args_used;
1103
Jonathan Brassowf44db672007-07-12 17:29:04 +01001104 /*
1105 * Any read-balancing addition depends on the
1106 * DM_RAID1_HANDLE_ERRORS flag being present.
1107 * This is because the decision to balance depends
1108 * on the sync state of a region. If the above
1109 * flag is not present, we ignore errors; and
1110 * the sync state may be inaccurate.
1111 */
1112
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001113 if (argc) {
1114 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001115 r = -EINVAL;
1116 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001117 }
1118
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001119 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001120 if (r)
1121 goto err_destroy_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001123 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001125
1126err_destroy_wq:
1127 destroy_workqueue(ms->kmirrord_wq);
1128err_free_context:
1129 free_context(ms, ti, ms->nr_mirrors);
1130 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131}
1132
1133static void mirror_dtr(struct dm_target *ti)
1134{
1135 struct mirror_set *ms = (struct mirror_set *) ti->private;
1136
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001137 del_timer_sync(&ms->timer);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001138 flush_workqueue(ms->kmirrord_wq);
Tejun Heod5ffa382011-01-13 19:59:56 +00001139 flush_work_sync(&ms->trigger_event);
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001140 dm_kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001141 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 free_context(ms, ti, ms->nr_mirrors);
1143}
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145/*
1146 * Mirror mapping function
1147 */
1148static int mirror_map(struct dm_target *ti, struct bio *bio,
1149 union map_info *map_context)
1150{
1151 int r, rw = bio_rw(bio);
1152 struct mirror *m;
1153 struct mirror_set *ms = ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001154 struct dm_raid1_read_record *read_record = NULL;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001155 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
1157 if (rw == WRITE) {
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001158 /* Save region for mirror_end_io() handler */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001159 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001161 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
1163
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001164 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 if (r < 0 && r != -EWOULDBLOCK)
1166 return r;
1167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 /*
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001169 * If region is not in-sync queue the bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001171 if (!r || (r == -EWOULDBLOCK)) {
1172 if (rw == READA)
1173 return -EWOULDBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001176 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 }
1178
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001179 /*
1180 * The region is in-sync and we can perform reads directly.
1181 * Store enough information so we can retry if it fails.
1182 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 m = choose_mirror(ms, bio->bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001184 if (unlikely(!m))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return -EIO;
1186
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001187 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1188 if (likely(read_record)) {
1189 dm_bio_record(&read_record->details, bio);
1190 map_context->ptr = read_record;
1191 read_record->m = m;
1192 }
1193
1194 map_bio(m, bio);
1195
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001196 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197}
1198
1199static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1200 int error, union map_info *map_context)
1201{
1202 int rw = bio_rw(bio);
1203 struct mirror_set *ms = (struct mirror_set *) ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001204 struct mirror *m = NULL;
1205 struct dm_bio_details *bd = NULL;
1206 struct dm_raid1_read_record *read_record = map_context->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
1208 /*
1209 * We need to dec pending if this was a write.
1210 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001211 if (rw == WRITE) {
Tejun Heod87f4c12010-09-03 11:56:19 +02001212 if (!(bio->bi_rw & REQ_FLUSH))
Mikulas Patocka41841532009-12-10 23:51:59 +00001213 dm_rh_dec(ms->rh, map_context->ll);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001214 return error;
1215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001217 if (error == -EOPNOTSUPP)
1218 goto out;
1219
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001220 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001221 goto out;
1222
1223 if (unlikely(error)) {
1224 if (!read_record) {
1225 /*
1226 * There wasn't enough memory to record necessary
1227 * information for a retry or there was no other
1228 * mirror in-sync.
1229 */
Adrian Bunke03f1a82008-02-19 19:44:19 +00001230 DMERR_LIMIT("Mirror read failed.");
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001231 return -EIO;
1232 }
Adrian Bunke03f1a82008-02-19 19:44:19 +00001233
1234 m = read_record->m;
1235
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001236 DMERR("Mirror read failed from %s. Trying alternative device.",
1237 m->dev->name);
1238
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001239 fail_mirror(m, DM_RAID1_READ_ERROR);
1240
1241 /*
1242 * A failed read is requeued for another attempt using an intact
1243 * mirror.
1244 */
1245 if (default_ok(m) || mirror_available(ms, bio)) {
1246 bd = &read_record->details;
1247
1248 dm_bio_restore(bd, bio);
1249 mempool_free(read_record, ms->read_record_pool);
1250 map_context->ptr = NULL;
1251 queue_bio(ms, bio, rw);
1252 return 1;
1253 }
1254 DMERR("All replicated volumes dead, failing I/O");
1255 }
1256
1257out:
1258 if (read_record) {
1259 mempool_free(read_record, ms->read_record_pool);
1260 map_context->ptr = NULL;
1261 }
1262
1263 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001266static void mirror_presuspend(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
1268 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001269 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Mikulas Patocka04788502009-12-10 23:52:03 +00001271 struct bio_list holds;
1272 struct bio *bio;
1273
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001274 atomic_set(&ms->suspend, 1);
1275
1276 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +00001277 * Process bios in the hold list to start recovery waiting
1278 * for bios in the hold list. After the process, no bio has
1279 * a chance to be added in the hold list because ms->suspend
1280 * is set.
1281 */
1282 spin_lock_irq(&ms->lock);
1283 holds = ms->holds;
1284 bio_list_init(&ms->holds);
1285 spin_unlock_irq(&ms->lock);
1286
1287 while ((bio = bio_list_pop(&holds)))
1288 hold_bio(ms, bio);
1289
1290 /*
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001291 * We must finish up all the work that we've
1292 * generated (i.e. recovery work).
1293 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001294 dm_rh_stop_recovery(ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001295
Jonathan E Brassow33184042006-11-08 17:44:44 -08001296 wait_event(_kmirrord_recovery_stopped,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001297 !dm_rh_recovery_in_flight(ms->rh));
Jonathan E Brassow33184042006-11-08 17:44:44 -08001298
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001299 if (log->type->presuspend && log->type->presuspend(log))
1300 /* FIXME: need better error handling */
1301 DMWARN("log presuspend failed");
1302
1303 /*
1304 * Now that recovery is complete/stopped and the
1305 * delayed bios are queued, we need to wait for
1306 * the worker thread to complete. This way,
1307 * we know that all of our I/O has been pushed.
1308 */
1309 flush_workqueue(ms->kmirrord_wq);
1310}
1311
1312static void mirror_postsuspend(struct dm_target *ti)
1313{
1314 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001315 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001316
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001317 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 /* FIXME: need better error handling */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001319 DMWARN("log postsuspend failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320}
1321
1322static void mirror_resume(struct dm_target *ti)
1323{
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001324 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001325 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001326
1327 atomic_set(&ms->suspend, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 if (log->type->resume && log->type->resume(log))
1329 /* FIXME: need better error handling */
1330 DMWARN("log resume failed");
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001331 dm_rh_start_recovery(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332}
1333
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001334/*
1335 * device_status_char
1336 * @m: mirror device/leg we want the status of
1337 *
1338 * We return one character representing the most severe error
1339 * we have encountered.
1340 * A => Alive - No failures
1341 * D => Dead - A write failure occurred leaving mirror out-of-sync
1342 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1343 * R => Read - A read failure occurred, mirror data unaffected
1344 *
1345 * Returns: <char>
1346 */
1347static char device_status_char(struct mirror *m)
1348{
1349 if (!atomic_read(&(m->error_count)))
1350 return 'A';
1351
Mikulas Patocka64b30c42009-12-10 23:52:02 +00001352 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1353 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001354 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1355 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1356}
1357
1358
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359static int mirror_status(struct dm_target *ti, status_type_t type,
1360 char *result, unsigned int maxlen)
1361{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001362 unsigned int m, sz = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001364 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001365 char buffer[ms->nr_mirrors + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 switch (type) {
1368 case STATUSTYPE_INFO:
1369 DMEMIT("%d ", ms->nr_mirrors);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001370 for (m = 0; m < ms->nr_mirrors; m++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 DMEMIT("%s ", ms->mirror[m].dev->name);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001372 buffer[m] = device_status_char(&(ms->mirror[m]));
1373 }
1374 buffer[m] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001376 DMEMIT("%llu/%llu 1 %s ",
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001377 (unsigned long long)log->type->get_sync_count(log),
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001378 (unsigned long long)ms->nr_regions, buffer);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001379
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001380 sz += log->type->status(log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 break;
1383
1384 case STATUSTYPE_TABLE:
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001385 sz = log->type->status(log, type, result, maxlen);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001386
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001387 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001389 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001390 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001391
1392 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1393 DMEMIT(" 1 handle_errors");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 }
1395
1396 return 0;
1397}
1398
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001399static int mirror_iterate_devices(struct dm_target *ti,
1400 iterate_devices_callout_fn fn, void *data)
1401{
1402 struct mirror_set *ms = ti->private;
1403 int ret = 0;
1404 unsigned i;
1405
1406 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1407 ret = fn(ti, ms->mirror[i].dev,
Mike Snitzer5dea2712009-07-23 20:30:42 +01001408 ms->mirror[i].offset, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001409
1410 return ret;
1411}
1412
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413static struct target_type mirror_target = {
1414 .name = "mirror",
Tejun Heo9c4376d2011-01-13 19:59:58 +00001415 .version = {1, 12, 1},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 .module = THIS_MODULE,
1417 .ctr = mirror_ctr,
1418 .dtr = mirror_dtr,
1419 .map = mirror_map,
1420 .end_io = mirror_end_io,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001421 .presuspend = mirror_presuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 .postsuspend = mirror_postsuspend,
1423 .resume = mirror_resume,
1424 .status = mirror_status,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001425 .iterate_devices = mirror_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426};
1427
1428static int __init dm_mirror_init(void)
1429{
1430 int r;
1431
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001432 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1433 if (!_dm_raid1_read_record_cache) {
1434 DMERR("Can't allocate dm_raid1_read_record cache");
1435 r = -ENOMEM;
1436 goto bad_cache;
1437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001439 r = dm_register_target(&mirror_target);
1440 if (r < 0) {
1441 DMERR("Failed to register mirror target");
1442 goto bad_target;
1443 }
1444
1445 return 0;
1446
1447bad_target:
1448 kmem_cache_destroy(_dm_raid1_read_record_cache);
1449bad_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 return r;
1451}
1452
1453static void __exit dm_mirror_exit(void)
1454{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001455 dm_unregister_target(&mirror_target);
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001456 kmem_cache_destroy(_dm_raid1_read_record_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457}
1458
1459/* Module hooks */
1460module_init(dm_mirror_init);
1461module_exit(dm_mirror_exit);
1462
1463MODULE_DESCRIPTION(DM_NAME " mirror target");
1464MODULE_AUTHOR("Joe Thornber");
1465MODULE_LICENSE("GPL");