| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2003 Sistina Software Limited. | 
|  | 3 | * | 
|  | 4 | * This file is released under the GPL. | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | #include "dm.h" | 
|  | 8 | #include "dm-bio-list.h" | 
|  | 9 | #include "dm-io.h" | 
|  | 10 | #include "dm-log.h" | 
|  | 11 | #include "kcopyd.h" | 
|  | 12 |  | 
|  | 13 | #include <linux/ctype.h> | 
|  | 14 | #include <linux/init.h> | 
|  | 15 | #include <linux/mempool.h> | 
|  | 16 | #include <linux/module.h> | 
|  | 17 | #include <linux/pagemap.h> | 
|  | 18 | #include <linux/slab.h> | 
|  | 19 | #include <linux/time.h> | 
|  | 20 | #include <linux/vmalloc.h> | 
|  | 21 | #include <linux/workqueue.h> | 
|  | 22 |  | 
|  | 23 | static struct workqueue_struct *_kmirrord_wq; | 
|  | 24 | static struct work_struct _kmirrord_work; | 
|  | 25 |  | 
|  | 26 | static inline void wake(void) | 
|  | 27 | { | 
|  | 28 | queue_work(_kmirrord_wq, &_kmirrord_work); | 
|  | 29 | } | 
|  | 30 |  | 
|  | 31 | /*----------------------------------------------------------------- | 
|  | 32 | * Region hash | 
|  | 33 | * | 
|  | 34 | * The mirror splits itself up into discrete regions.  Each | 
|  | 35 | * region can be in one of three states: clean, dirty, | 
|  | 36 | * nosync.  There is no need to put clean regions in the hash. | 
|  | 37 | * | 
|  | 38 | * In addition to being present in the hash table a region _may_ | 
|  | 39 | * be present on one of three lists. | 
|  | 40 | * | 
|  | 41 | *   clean_regions: Regions on this list have no io pending to | 
|  | 42 | *   them, they are in sync, we are no longer interested in them, | 
|  | 43 | *   they are dull.  rh_update_states() will remove them from the | 
|  | 44 | *   hash table. | 
|  | 45 | * | 
|  | 46 | *   quiesced_regions: These regions have been spun down, ready | 
|  | 47 | *   for recovery.  rh_recovery_start() will remove regions from | 
|  | 48 | *   this list and hand them to kmirrord, which will schedule the | 
|  | 49 | *   recovery io with kcopyd. | 
|  | 50 | * | 
|  | 51 | *   recovered_regions: Regions that kcopyd has successfully | 
|  | 52 | *   recovered.  rh_update_states() will now schedule any delayed | 
|  | 53 | *   io, up the recovery_count, and remove the region from the | 
|  | 54 | *   hash. | 
|  | 55 | * | 
|  | 56 | * There are 2 locks: | 
|  | 57 | *   A rw spin lock 'hash_lock' protects just the hash table, | 
|  | 58 | *   this is never held in write mode from interrupt context, | 
|  | 59 | *   which I believe means that we only have to disable irqs when | 
|  | 60 | *   doing a write lock. | 
|  | 61 | * | 
|  | 62 | *   An ordinary spin lock 'region_lock' that protects the three | 
|  | 63 | *   lists in the region_hash, with the 'state', 'list' and | 
|  | 64 | *   'bhs_delayed' fields of the regions.  This is used from irq | 
|  | 65 | *   context, so all other uses will have to suspend local irqs. | 
|  | 66 | *---------------------------------------------------------------*/ | 
|  | 67 | struct mirror_set; | 
|  | 68 | struct region_hash { | 
|  | 69 | struct mirror_set *ms; | 
|  | 70 | uint32_t region_size; | 
|  | 71 | unsigned region_shift; | 
|  | 72 |  | 
|  | 73 | /* holds persistent region state */ | 
|  | 74 | struct dirty_log *log; | 
|  | 75 |  | 
|  | 76 | /* hash table */ | 
|  | 77 | rwlock_t hash_lock; | 
|  | 78 | mempool_t *region_pool; | 
|  | 79 | unsigned int mask; | 
|  | 80 | unsigned int nr_buckets; | 
|  | 81 | struct list_head *buckets; | 
|  | 82 |  | 
|  | 83 | spinlock_t region_lock; | 
|  | 84 | struct semaphore recovery_count; | 
|  | 85 | struct list_head clean_regions; | 
|  | 86 | struct list_head quiesced_regions; | 
|  | 87 | struct list_head recovered_regions; | 
|  | 88 | }; | 
|  | 89 |  | 
|  | 90 | enum { | 
|  | 91 | RH_CLEAN, | 
|  | 92 | RH_DIRTY, | 
|  | 93 | RH_NOSYNC, | 
|  | 94 | RH_RECOVERING | 
|  | 95 | }; | 
|  | 96 |  | 
|  | 97 | struct region { | 
|  | 98 | struct region_hash *rh;	/* FIXME: can we get rid of this ? */ | 
|  | 99 | region_t key; | 
|  | 100 | int state; | 
|  | 101 |  | 
|  | 102 | struct list_head hash_list; | 
|  | 103 | struct list_head list; | 
|  | 104 |  | 
|  | 105 | atomic_t pending; | 
|  | 106 | struct bio_list delayed_bios; | 
|  | 107 | }; | 
|  | 108 |  | 
|  | 109 | /* | 
|  | 110 | * Conversion fns | 
|  | 111 | */ | 
|  | 112 | static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) | 
|  | 113 | { | 
|  | 114 | return bio->bi_sector >> rh->region_shift; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | static inline sector_t region_to_sector(struct region_hash *rh, region_t region) | 
|  | 118 | { | 
|  | 119 | return region << rh->region_shift; | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | /* FIXME move this */ | 
|  | 123 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); | 
|  | 124 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 125 | static void *region_alloc(gfp_t gfp_mask, void *pool_data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | { | 
|  | 127 | return kmalloc(sizeof(struct region), gfp_mask); | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | static void region_free(void *element, void *pool_data) | 
|  | 131 | { | 
|  | 132 | kfree(element); | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | #define MIN_REGIONS 64 | 
|  | 136 | #define MAX_RECOVERY 1 | 
|  | 137 | static int rh_init(struct region_hash *rh, struct mirror_set *ms, | 
|  | 138 | struct dirty_log *log, uint32_t region_size, | 
|  | 139 | region_t nr_regions) | 
|  | 140 | { | 
|  | 141 | unsigned int nr_buckets, max_buckets; | 
|  | 142 | size_t i; | 
|  | 143 |  | 
|  | 144 | /* | 
|  | 145 | * Calculate a suitable number of buckets for our hash | 
|  | 146 | * table. | 
|  | 147 | */ | 
|  | 148 | max_buckets = nr_regions >> 6; | 
|  | 149 | for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) | 
|  | 150 | ; | 
|  | 151 | nr_buckets >>= 1; | 
|  | 152 |  | 
|  | 153 | rh->ms = ms; | 
|  | 154 | rh->log = log; | 
|  | 155 | rh->region_size = region_size; | 
|  | 156 | rh->region_shift = ffs(region_size) - 1; | 
|  | 157 | rwlock_init(&rh->hash_lock); | 
|  | 158 | rh->mask = nr_buckets - 1; | 
|  | 159 | rh->nr_buckets = nr_buckets; | 
|  | 160 |  | 
|  | 161 | rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); | 
|  | 162 | if (!rh->buckets) { | 
|  | 163 | DMERR("unable to allocate region hash memory"); | 
|  | 164 | return -ENOMEM; | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | for (i = 0; i < nr_buckets; i++) | 
|  | 168 | INIT_LIST_HEAD(rh->buckets + i); | 
|  | 169 |  | 
|  | 170 | spin_lock_init(&rh->region_lock); | 
|  | 171 | sema_init(&rh->recovery_count, 0); | 
|  | 172 | INIT_LIST_HEAD(&rh->clean_regions); | 
|  | 173 | INIT_LIST_HEAD(&rh->quiesced_regions); | 
|  | 174 | INIT_LIST_HEAD(&rh->recovered_regions); | 
|  | 175 |  | 
|  | 176 | rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, | 
|  | 177 | region_free, NULL); | 
|  | 178 | if (!rh->region_pool) { | 
|  | 179 | vfree(rh->buckets); | 
|  | 180 | rh->buckets = NULL; | 
|  | 181 | return -ENOMEM; | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | return 0; | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | static void rh_exit(struct region_hash *rh) | 
|  | 188 | { | 
|  | 189 | unsigned int h; | 
|  | 190 | struct region *reg, *nreg; | 
|  | 191 |  | 
|  | 192 | BUG_ON(!list_empty(&rh->quiesced_regions)); | 
|  | 193 | for (h = 0; h < rh->nr_buckets; h++) { | 
|  | 194 | list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { | 
|  | 195 | BUG_ON(atomic_read(®->pending)); | 
|  | 196 | mempool_free(reg, rh->region_pool); | 
|  | 197 | } | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | if (rh->log) | 
|  | 201 | dm_destroy_dirty_log(rh->log); | 
|  | 202 | if (rh->region_pool) | 
|  | 203 | mempool_destroy(rh->region_pool); | 
|  | 204 | vfree(rh->buckets); | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | #define RH_HASH_MULT 2654435387U | 
|  | 208 |  | 
|  | 209 | static inline unsigned int rh_hash(struct region_hash *rh, region_t region) | 
|  | 210 | { | 
|  | 211 | return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | static struct region *__rh_lookup(struct region_hash *rh, region_t region) | 
|  | 215 | { | 
|  | 216 | struct region *reg; | 
|  | 217 |  | 
|  | 218 | list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) | 
|  | 219 | if (reg->key == region) | 
|  | 220 | return reg; | 
|  | 221 |  | 
|  | 222 | return NULL; | 
|  | 223 | } | 
|  | 224 |  | 
|  | 225 | static void __rh_insert(struct region_hash *rh, struct region *reg) | 
|  | 226 | { | 
|  | 227 | unsigned int h = rh_hash(rh, reg->key); | 
|  | 228 | list_add(®->hash_list, rh->buckets + h); | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | static struct region *__rh_alloc(struct region_hash *rh, region_t region) | 
|  | 232 | { | 
|  | 233 | struct region *reg, *nreg; | 
|  | 234 |  | 
|  | 235 | read_unlock(&rh->hash_lock); | 
|  | 236 | nreg = mempool_alloc(rh->region_pool, GFP_NOIO); | 
|  | 237 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? | 
|  | 238 | RH_CLEAN : RH_NOSYNC; | 
|  | 239 | nreg->rh = rh; | 
|  | 240 | nreg->key = region; | 
|  | 241 |  | 
|  | 242 | INIT_LIST_HEAD(&nreg->list); | 
|  | 243 |  | 
|  | 244 | atomic_set(&nreg->pending, 0); | 
|  | 245 | bio_list_init(&nreg->delayed_bios); | 
|  | 246 | write_lock_irq(&rh->hash_lock); | 
|  | 247 |  | 
|  | 248 | reg = __rh_lookup(rh, region); | 
|  | 249 | if (reg) | 
|  | 250 | /* we lost the race */ | 
|  | 251 | mempool_free(nreg, rh->region_pool); | 
|  | 252 |  | 
|  | 253 | else { | 
|  | 254 | __rh_insert(rh, nreg); | 
|  | 255 | if (nreg->state == RH_CLEAN) { | 
|  | 256 | spin_lock(&rh->region_lock); | 
|  | 257 | list_add(&nreg->list, &rh->clean_regions); | 
|  | 258 | spin_unlock(&rh->region_lock); | 
|  | 259 | } | 
|  | 260 | reg = nreg; | 
|  | 261 | } | 
|  | 262 | write_unlock_irq(&rh->hash_lock); | 
|  | 263 | read_lock(&rh->hash_lock); | 
|  | 264 |  | 
|  | 265 | return reg; | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | static inline struct region *__rh_find(struct region_hash *rh, region_t region) | 
|  | 269 | { | 
|  | 270 | struct region *reg; | 
|  | 271 |  | 
|  | 272 | reg = __rh_lookup(rh, region); | 
|  | 273 | if (!reg) | 
|  | 274 | reg = __rh_alloc(rh, region); | 
|  | 275 |  | 
|  | 276 | return reg; | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 | static int rh_state(struct region_hash *rh, region_t region, int may_block) | 
|  | 280 | { | 
|  | 281 | int r; | 
|  | 282 | struct region *reg; | 
|  | 283 |  | 
|  | 284 | read_lock(&rh->hash_lock); | 
|  | 285 | reg = __rh_lookup(rh, region); | 
|  | 286 | read_unlock(&rh->hash_lock); | 
|  | 287 |  | 
|  | 288 | if (reg) | 
|  | 289 | return reg->state; | 
|  | 290 |  | 
|  | 291 | /* | 
|  | 292 | * The region wasn't in the hash, so we fall back to the | 
|  | 293 | * dirty log. | 
|  | 294 | */ | 
|  | 295 | r = rh->log->type->in_sync(rh->log, region, may_block); | 
|  | 296 |  | 
|  | 297 | /* | 
|  | 298 | * Any error from the dirty log (eg. -EWOULDBLOCK) gets | 
|  | 299 | * taken as a RH_NOSYNC | 
|  | 300 | */ | 
|  | 301 | return r == 1 ? RH_CLEAN : RH_NOSYNC; | 
|  | 302 | } | 
|  | 303 |  | 
|  | 304 | static inline int rh_in_sync(struct region_hash *rh, | 
|  | 305 | region_t region, int may_block) | 
|  | 306 | { | 
|  | 307 | int state = rh_state(rh, region, may_block); | 
|  | 308 | return state == RH_CLEAN || state == RH_DIRTY; | 
|  | 309 | } | 
|  | 310 |  | 
|  | 311 | static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) | 
|  | 312 | { | 
|  | 313 | struct bio *bio; | 
|  | 314 |  | 
|  | 315 | while ((bio = bio_list_pop(bio_list))) { | 
|  | 316 | queue_bio(ms, bio, WRITE); | 
|  | 317 | } | 
|  | 318 | } | 
|  | 319 |  | 
|  | 320 | static void rh_update_states(struct region_hash *rh) | 
|  | 321 | { | 
|  | 322 | struct region *reg, *next; | 
|  | 323 |  | 
|  | 324 | LIST_HEAD(clean); | 
|  | 325 | LIST_HEAD(recovered); | 
|  | 326 |  | 
|  | 327 | /* | 
|  | 328 | * Quickly grab the lists. | 
|  | 329 | */ | 
|  | 330 | write_lock_irq(&rh->hash_lock); | 
|  | 331 | spin_lock(&rh->region_lock); | 
|  | 332 | if (!list_empty(&rh->clean_regions)) { | 
|  | 333 | list_splice(&rh->clean_regions, &clean); | 
|  | 334 | INIT_LIST_HEAD(&rh->clean_regions); | 
|  | 335 |  | 
|  | 336 | list_for_each_entry (reg, &clean, list) { | 
|  | 337 | rh->log->type->clear_region(rh->log, reg->key); | 
|  | 338 | list_del(®->hash_list); | 
|  | 339 | } | 
|  | 340 | } | 
|  | 341 |  | 
|  | 342 | if (!list_empty(&rh->recovered_regions)) { | 
|  | 343 | list_splice(&rh->recovered_regions, &recovered); | 
|  | 344 | INIT_LIST_HEAD(&rh->recovered_regions); | 
|  | 345 |  | 
|  | 346 | list_for_each_entry (reg, &recovered, list) | 
|  | 347 | list_del(®->hash_list); | 
|  | 348 | } | 
|  | 349 | spin_unlock(&rh->region_lock); | 
|  | 350 | write_unlock_irq(&rh->hash_lock); | 
|  | 351 |  | 
|  | 352 | /* | 
|  | 353 | * All the regions on the recovered and clean lists have | 
|  | 354 | * now been pulled out of the system, so no need to do | 
|  | 355 | * any more locking. | 
|  | 356 | */ | 
|  | 357 | list_for_each_entry_safe (reg, next, &recovered, list) { | 
|  | 358 | rh->log->type->clear_region(rh->log, reg->key); | 
|  | 359 | rh->log->type->complete_resync_work(rh->log, reg->key, 1); | 
|  | 360 | dispatch_bios(rh->ms, ®->delayed_bios); | 
|  | 361 | up(&rh->recovery_count); | 
|  | 362 | mempool_free(reg, rh->region_pool); | 
|  | 363 | } | 
|  | 364 |  | 
|  | 365 | if (!list_empty(&recovered)) | 
|  | 366 | rh->log->type->flush(rh->log); | 
|  | 367 |  | 
|  | 368 | list_for_each_entry_safe (reg, next, &clean, list) | 
|  | 369 | mempool_free(reg, rh->region_pool); | 
|  | 370 | } | 
|  | 371 |  | 
|  | 372 | static void rh_inc(struct region_hash *rh, region_t region) | 
|  | 373 | { | 
|  | 374 | struct region *reg; | 
|  | 375 |  | 
|  | 376 | read_lock(&rh->hash_lock); | 
|  | 377 | reg = __rh_find(rh, region); | 
| Jun'ichi Nomura | 844e8d9 | 2005-09-09 16:23:42 -0700 | [diff] [blame] | 378 |  | 
| Jonathan E Brassow | 7692c5d | 2005-11-21 21:32:37 -0800 | [diff] [blame] | 379 | spin_lock_irq(&rh->region_lock); | 
| Jun'ichi Nomura | 844e8d9 | 2005-09-09 16:23:42 -0700 | [diff] [blame] | 380 | atomic_inc(®->pending); | 
|  | 381 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | if (reg->state == RH_CLEAN) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | reg->state = RH_DIRTY; | 
|  | 384 | list_del_init(®->list);	/* take off the clean list */ | 
| Jonathan E Brassow | 7692c5d | 2005-11-21 21:32:37 -0800 | [diff] [blame] | 385 | spin_unlock_irq(&rh->region_lock); | 
|  | 386 |  | 
|  | 387 | rh->log->type->mark_region(rh->log, reg->key); | 
|  | 388 | } else | 
|  | 389 | spin_unlock_irq(&rh->region_lock); | 
|  | 390 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | read_unlock(&rh->hash_lock); | 
|  | 393 | } | 
|  | 394 |  | 
|  | 395 | static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) | 
|  | 396 | { | 
|  | 397 | struct bio *bio; | 
|  | 398 |  | 
|  | 399 | for (bio = bios->head; bio; bio = bio->bi_next) | 
|  | 400 | rh_inc(rh, bio_to_region(rh, bio)); | 
|  | 401 | } | 
|  | 402 |  | 
|  | 403 | static void rh_dec(struct region_hash *rh, region_t region) | 
|  | 404 | { | 
|  | 405 | unsigned long flags; | 
|  | 406 | struct region *reg; | 
|  | 407 | int should_wake = 0; | 
|  | 408 |  | 
|  | 409 | read_lock(&rh->hash_lock); | 
|  | 410 | reg = __rh_lookup(rh, region); | 
|  | 411 | read_unlock(&rh->hash_lock); | 
|  | 412 |  | 
| Jonathan E Brassow | 7692c5d | 2005-11-21 21:32:37 -0800 | [diff] [blame] | 413 | spin_lock_irqsave(&rh->region_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | if (atomic_dec_and_test(®->pending)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | if (reg->state == RH_RECOVERING) { | 
|  | 416 | list_add_tail(®->list, &rh->quiesced_regions); | 
|  | 417 | } else { | 
|  | 418 | reg->state = RH_CLEAN; | 
|  | 419 | list_add(®->list, &rh->clean_regions); | 
|  | 420 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | should_wake = 1; | 
|  | 422 | } | 
| Jonathan E Brassow | 7692c5d | 2005-11-21 21:32:37 -0800 | [diff] [blame] | 423 | spin_unlock_irqrestore(&rh->region_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 |  | 
|  | 425 | if (should_wake) | 
|  | 426 | wake(); | 
|  | 427 | } | 
|  | 428 |  | 
|  | 429 | /* | 
|  | 430 | * Starts quiescing a region in preparation for recovery. | 
|  | 431 | */ | 
|  | 432 | static int __rh_recovery_prepare(struct region_hash *rh) | 
|  | 433 | { | 
|  | 434 | int r; | 
|  | 435 | struct region *reg; | 
|  | 436 | region_t region; | 
|  | 437 |  | 
|  | 438 | /* | 
|  | 439 | * Ask the dirty log what's next. | 
|  | 440 | */ | 
|  | 441 | r = rh->log->type->get_resync_work(rh->log, ®ion); | 
|  | 442 | if (r <= 0) | 
|  | 443 | return r; | 
|  | 444 |  | 
|  | 445 | /* | 
|  | 446 | * Get this region, and start it quiescing by setting the | 
|  | 447 | * recovering flag. | 
|  | 448 | */ | 
|  | 449 | read_lock(&rh->hash_lock); | 
|  | 450 | reg = __rh_find(rh, region); | 
|  | 451 | read_unlock(&rh->hash_lock); | 
|  | 452 |  | 
|  | 453 | spin_lock_irq(&rh->region_lock); | 
|  | 454 | reg->state = RH_RECOVERING; | 
|  | 455 |  | 
|  | 456 | /* Already quiesced ? */ | 
|  | 457 | if (atomic_read(®->pending)) | 
|  | 458 | list_del_init(®->list); | 
|  | 459 |  | 
|  | 460 | else { | 
|  | 461 | list_del_init(®->list); | 
|  | 462 | list_add(®->list, &rh->quiesced_regions); | 
|  | 463 | } | 
|  | 464 | spin_unlock_irq(&rh->region_lock); | 
|  | 465 |  | 
|  | 466 | return 1; | 
|  | 467 | } | 
|  | 468 |  | 
|  | 469 | static void rh_recovery_prepare(struct region_hash *rh) | 
|  | 470 | { | 
|  | 471 | while (!down_trylock(&rh->recovery_count)) | 
|  | 472 | if (__rh_recovery_prepare(rh) <= 0) { | 
|  | 473 | up(&rh->recovery_count); | 
|  | 474 | break; | 
|  | 475 | } | 
|  | 476 | } | 
|  | 477 |  | 
|  | 478 | /* | 
|  | 479 | * Returns any quiesced regions. | 
|  | 480 | */ | 
|  | 481 | static struct region *rh_recovery_start(struct region_hash *rh) | 
|  | 482 | { | 
|  | 483 | struct region *reg = NULL; | 
|  | 484 |  | 
|  | 485 | spin_lock_irq(&rh->region_lock); | 
|  | 486 | if (!list_empty(&rh->quiesced_regions)) { | 
|  | 487 | reg = list_entry(rh->quiesced_regions.next, | 
|  | 488 | struct region, list); | 
|  | 489 | list_del_init(®->list);	/* remove from the quiesced list */ | 
|  | 490 | } | 
|  | 491 | spin_unlock_irq(&rh->region_lock); | 
|  | 492 |  | 
|  | 493 | return reg; | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | /* FIXME: success ignored for now */ | 
|  | 497 | static void rh_recovery_end(struct region *reg, int success) | 
|  | 498 | { | 
|  | 499 | struct region_hash *rh = reg->rh; | 
|  | 500 |  | 
|  | 501 | spin_lock_irq(&rh->region_lock); | 
|  | 502 | list_add(®->list, ®->rh->recovered_regions); | 
|  | 503 | spin_unlock_irq(&rh->region_lock); | 
|  | 504 |  | 
|  | 505 | wake(); | 
|  | 506 | } | 
|  | 507 |  | 
|  | 508 | static void rh_flush(struct region_hash *rh) | 
|  | 509 | { | 
|  | 510 | rh->log->type->flush(rh->log); | 
|  | 511 | } | 
|  | 512 |  | 
|  | 513 | static void rh_delay(struct region_hash *rh, struct bio *bio) | 
|  | 514 | { | 
|  | 515 | struct region *reg; | 
|  | 516 |  | 
|  | 517 | read_lock(&rh->hash_lock); | 
|  | 518 | reg = __rh_find(rh, bio_to_region(rh, bio)); | 
|  | 519 | bio_list_add(®->delayed_bios, bio); | 
|  | 520 | read_unlock(&rh->hash_lock); | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | static void rh_stop_recovery(struct region_hash *rh) | 
|  | 524 | { | 
|  | 525 | int i; | 
|  | 526 |  | 
|  | 527 | /* wait for any recovering regions */ | 
|  | 528 | for (i = 0; i < MAX_RECOVERY; i++) | 
|  | 529 | down(&rh->recovery_count); | 
|  | 530 | } | 
|  | 531 |  | 
|  | 532 | static void rh_start_recovery(struct region_hash *rh) | 
|  | 533 | { | 
|  | 534 | int i; | 
|  | 535 |  | 
|  | 536 | for (i = 0; i < MAX_RECOVERY; i++) | 
|  | 537 | up(&rh->recovery_count); | 
|  | 538 |  | 
|  | 539 | wake(); | 
|  | 540 | } | 
|  | 541 |  | 
|  | 542 | /*----------------------------------------------------------------- | 
|  | 543 | * Mirror set structures. | 
|  | 544 | *---------------------------------------------------------------*/ | 
|  | 545 | struct mirror { | 
|  | 546 | atomic_t error_count; | 
|  | 547 | struct dm_dev *dev; | 
|  | 548 | sector_t offset; | 
|  | 549 | }; | 
|  | 550 |  | 
|  | 551 | struct mirror_set { | 
|  | 552 | struct dm_target *ti; | 
|  | 553 | struct list_head list; | 
|  | 554 | struct region_hash rh; | 
|  | 555 | struct kcopyd_client *kcopyd_client; | 
|  | 556 |  | 
|  | 557 | spinlock_t lock;	/* protects the next two lists */ | 
|  | 558 | struct bio_list reads; | 
|  | 559 | struct bio_list writes; | 
|  | 560 |  | 
|  | 561 | /* recovery */ | 
|  | 562 | region_t nr_regions; | 
|  | 563 | int in_sync; | 
|  | 564 |  | 
| Jonathan E Brassow | a1a1908 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 565 | struct mirror *default_mirror;	/* Default mirror */ | 
|  | 566 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | unsigned int nr_mirrors; | 
|  | 568 | struct mirror mirror[0]; | 
|  | 569 | }; | 
|  | 570 |  | 
|  | 571 | /* | 
|  | 572 | * Every mirror should look like this one. | 
|  | 573 | */ | 
|  | 574 | #define DEFAULT_MIRROR 0 | 
|  | 575 |  | 
|  | 576 | /* | 
|  | 577 | * This is yucky.  We squirrel the mirror_set struct away inside | 
|  | 578 | * bi_next for write buffers.  This is safe since the bh | 
|  | 579 | * doesn't get submitted to the lower levels of block layer. | 
|  | 580 | */ | 
|  | 581 | static struct mirror_set *bio_get_ms(struct bio *bio) | 
|  | 582 | { | 
|  | 583 | return (struct mirror_set *) bio->bi_next; | 
|  | 584 | } | 
|  | 585 |  | 
|  | 586 | static void bio_set_ms(struct bio *bio, struct mirror_set *ms) | 
|  | 587 | { | 
|  | 588 | bio->bi_next = (struct bio *) ms; | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 | /*----------------------------------------------------------------- | 
|  | 592 | * Recovery. | 
|  | 593 | * | 
|  | 594 | * When a mirror is first activated we may find that some regions | 
|  | 595 | * are in the no-sync state.  We have to recover these by | 
|  | 596 | * recopying from the default mirror to all the others. | 
|  | 597 | *---------------------------------------------------------------*/ | 
|  | 598 | static void recovery_complete(int read_err, unsigned int write_err, | 
|  | 599 | void *context) | 
|  | 600 | { | 
|  | 601 | struct region *reg = (struct region *) context; | 
|  | 602 |  | 
|  | 603 | /* FIXME: better error handling */ | 
|  | 604 | rh_recovery_end(reg, read_err || write_err); | 
|  | 605 | } | 
|  | 606 |  | 
|  | 607 | static int recover(struct mirror_set *ms, struct region *reg) | 
|  | 608 | { | 
|  | 609 | int r; | 
|  | 610 | unsigned int i; | 
|  | 611 | struct io_region from, to[KCOPYD_MAX_REGIONS], *dest; | 
|  | 612 | struct mirror *m; | 
|  | 613 | unsigned long flags = 0; | 
|  | 614 |  | 
|  | 615 | /* fill in the source */ | 
| Jonathan E Brassow | a1a1908 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 616 | m = ms->default_mirror; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | from.bdev = m->dev->bdev; | 
|  | 618 | from.sector = m->offset + region_to_sector(reg->rh, reg->key); | 
|  | 619 | if (reg->key == (ms->nr_regions - 1)) { | 
|  | 620 | /* | 
|  | 621 | * The final region may be smaller than | 
|  | 622 | * region_size. | 
|  | 623 | */ | 
|  | 624 | from.count = ms->ti->len & (reg->rh->region_size - 1); | 
|  | 625 | if (!from.count) | 
|  | 626 | from.count = reg->rh->region_size; | 
|  | 627 | } else | 
|  | 628 | from.count = reg->rh->region_size; | 
|  | 629 |  | 
|  | 630 | /* fill in the destinations */ | 
|  | 631 | for (i = 0, dest = to; i < ms->nr_mirrors; i++) { | 
| Jonathan E Brassow | a1a1908 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 632 | if (&ms->mirror[i] == ms->default_mirror) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | continue; | 
|  | 634 |  | 
|  | 635 | m = ms->mirror + i; | 
|  | 636 | dest->bdev = m->dev->bdev; | 
|  | 637 | dest->sector = m->offset + region_to_sector(reg->rh, reg->key); | 
|  | 638 | dest->count = from.count; | 
|  | 639 | dest++; | 
|  | 640 | } | 
|  | 641 |  | 
|  | 642 | /* hand to kcopyd */ | 
|  | 643 | set_bit(KCOPYD_IGNORE_ERROR, &flags); | 
|  | 644 | r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, | 
|  | 645 | recovery_complete, reg); | 
|  | 646 |  | 
|  | 647 | return r; | 
|  | 648 | } | 
|  | 649 |  | 
|  | 650 | static void do_recovery(struct mirror_set *ms) | 
|  | 651 | { | 
|  | 652 | int r; | 
|  | 653 | struct region *reg; | 
|  | 654 | struct dirty_log *log = ms->rh.log; | 
|  | 655 |  | 
|  | 656 | /* | 
|  | 657 | * Start quiescing some regions. | 
|  | 658 | */ | 
|  | 659 | rh_recovery_prepare(&ms->rh); | 
|  | 660 |  | 
|  | 661 | /* | 
|  | 662 | * Copy any already quiesced regions. | 
|  | 663 | */ | 
|  | 664 | while ((reg = rh_recovery_start(&ms->rh))) { | 
|  | 665 | r = recover(ms, reg); | 
|  | 666 | if (r) | 
|  | 667 | rh_recovery_end(reg, 0); | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | /* | 
|  | 671 | * Update the in sync flag. | 
|  | 672 | */ | 
|  | 673 | if (!ms->in_sync && | 
|  | 674 | (log->type->get_sync_count(log) == ms->nr_regions)) { | 
|  | 675 | /* the sync is complete */ | 
|  | 676 | dm_table_event(ms->ti->table); | 
|  | 677 | ms->in_sync = 1; | 
|  | 678 | } | 
|  | 679 | } | 
|  | 680 |  | 
|  | 681 | /*----------------------------------------------------------------- | 
|  | 682 | * Reads | 
|  | 683 | *---------------------------------------------------------------*/ | 
|  | 684 | static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) | 
|  | 685 | { | 
|  | 686 | /* FIXME: add read balancing */ | 
| Jonathan E Brassow | a1a1908 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 687 | return ms->default_mirror; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | } | 
|  | 689 |  | 
|  | 690 | /* | 
|  | 691 | * remap a buffer to a particular mirror. | 
|  | 692 | */ | 
|  | 693 | static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio) | 
|  | 694 | { | 
|  | 695 | bio->bi_bdev = m->dev->bdev; | 
|  | 696 | bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin); | 
|  | 697 | } | 
|  | 698 |  | 
|  | 699 | static void do_reads(struct mirror_set *ms, struct bio_list *reads) | 
|  | 700 | { | 
|  | 701 | region_t region; | 
|  | 702 | struct bio *bio; | 
|  | 703 | struct mirror *m; | 
|  | 704 |  | 
|  | 705 | while ((bio = bio_list_pop(reads))) { | 
|  | 706 | region = bio_to_region(&ms->rh, bio); | 
|  | 707 |  | 
|  | 708 | /* | 
|  | 709 | * We can only read balance if the region is in sync. | 
|  | 710 | */ | 
|  | 711 | if (rh_in_sync(&ms->rh, region, 0)) | 
|  | 712 | m = choose_mirror(ms, bio->bi_sector); | 
|  | 713 | else | 
| Jonathan E Brassow | a1a1908 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 714 | m = ms->default_mirror; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 |  | 
|  | 716 | map_bio(ms, m, bio); | 
|  | 717 | generic_make_request(bio); | 
|  | 718 | } | 
|  | 719 | } | 
|  | 720 |  | 
|  | 721 | /*----------------------------------------------------------------- | 
|  | 722 | * Writes. | 
|  | 723 | * | 
|  | 724 | * We do different things with the write io depending on the | 
|  | 725 | * state of the region that it's in: | 
|  | 726 | * | 
|  | 727 | * SYNC: 	increment pending, use kcopyd to write to *all* mirrors | 
|  | 728 | * RECOVERING:	delay the io until recovery completes | 
|  | 729 | * NOSYNC:	increment pending, just write to the default mirror | 
|  | 730 | *---------------------------------------------------------------*/ | 
|  | 731 | static void write_callback(unsigned long error, void *context) | 
|  | 732 | { | 
|  | 733 | unsigned int i; | 
|  | 734 | int uptodate = 1; | 
|  | 735 | struct bio *bio = (struct bio *) context; | 
|  | 736 | struct mirror_set *ms; | 
|  | 737 |  | 
|  | 738 | ms = bio_get_ms(bio); | 
|  | 739 | bio_set_ms(bio, NULL); | 
|  | 740 |  | 
|  | 741 | /* | 
|  | 742 | * NOTE: We don't decrement the pending count here, | 
|  | 743 | * instead it is done by the targets endio function. | 
|  | 744 | * This way we handle both writes to SYNC and NOSYNC | 
|  | 745 | * regions with the same code. | 
|  | 746 | */ | 
|  | 747 |  | 
|  | 748 | if (error) { | 
|  | 749 | /* | 
|  | 750 | * only error the io if all mirrors failed. | 
|  | 751 | * FIXME: bogus | 
|  | 752 | */ | 
|  | 753 | uptodate = 0; | 
|  | 754 | for (i = 0; i < ms->nr_mirrors; i++) | 
|  | 755 | if (!test_bit(i, &error)) { | 
|  | 756 | uptodate = 1; | 
|  | 757 | break; | 
|  | 758 | } | 
|  | 759 | } | 
|  | 760 | bio_endio(bio, bio->bi_size, 0); | 
|  | 761 | } | 
|  | 762 |  | 
|  | 763 | static void do_write(struct mirror_set *ms, struct bio *bio) | 
|  | 764 | { | 
|  | 765 | unsigned int i; | 
|  | 766 | struct io_region io[KCOPYD_MAX_REGIONS+1]; | 
|  | 767 | struct mirror *m; | 
|  | 768 |  | 
|  | 769 | for (i = 0; i < ms->nr_mirrors; i++) { | 
|  | 770 | m = ms->mirror + i; | 
|  | 771 |  | 
|  | 772 | io[i].bdev = m->dev->bdev; | 
|  | 773 | io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin); | 
|  | 774 | io[i].count = bio->bi_size >> 9; | 
|  | 775 | } | 
|  | 776 |  | 
|  | 777 | bio_set_ms(bio, ms); | 
|  | 778 | dm_io_async_bvec(ms->nr_mirrors, io, WRITE, | 
|  | 779 | bio->bi_io_vec + bio->bi_idx, | 
|  | 780 | write_callback, bio); | 
|  | 781 | } | 
|  | 782 |  | 
|  | 783 | static void do_writes(struct mirror_set *ms, struct bio_list *writes) | 
|  | 784 | { | 
|  | 785 | int state; | 
|  | 786 | struct bio *bio; | 
|  | 787 | struct bio_list sync, nosync, recover, *this_list = NULL; | 
|  | 788 |  | 
|  | 789 | if (!writes->head) | 
|  | 790 | return; | 
|  | 791 |  | 
|  | 792 | /* | 
|  | 793 | * Classify each write. | 
|  | 794 | */ | 
|  | 795 | bio_list_init(&sync); | 
|  | 796 | bio_list_init(&nosync); | 
|  | 797 | bio_list_init(&recover); | 
|  | 798 |  | 
|  | 799 | while ((bio = bio_list_pop(writes))) { | 
|  | 800 | state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); | 
|  | 801 | switch (state) { | 
|  | 802 | case RH_CLEAN: | 
|  | 803 | case RH_DIRTY: | 
|  | 804 | this_list = &sync; | 
|  | 805 | break; | 
|  | 806 |  | 
|  | 807 | case RH_NOSYNC: | 
|  | 808 | this_list = &nosync; | 
|  | 809 | break; | 
|  | 810 |  | 
|  | 811 | case RH_RECOVERING: | 
|  | 812 | this_list = &recover; | 
|  | 813 | break; | 
|  | 814 | } | 
|  | 815 |  | 
|  | 816 | bio_list_add(this_list, bio); | 
|  | 817 | } | 
|  | 818 |  | 
|  | 819 | /* | 
|  | 820 | * Increment the pending counts for any regions that will | 
|  | 821 | * be written to (writes to recover regions are going to | 
|  | 822 | * be delayed). | 
|  | 823 | */ | 
|  | 824 | rh_inc_pending(&ms->rh, &sync); | 
|  | 825 | rh_inc_pending(&ms->rh, &nosync); | 
|  | 826 | rh_flush(&ms->rh); | 
|  | 827 |  | 
|  | 828 | /* | 
|  | 829 | * Dispatch io. | 
|  | 830 | */ | 
|  | 831 | while ((bio = bio_list_pop(&sync))) | 
|  | 832 | do_write(ms, bio); | 
|  | 833 |  | 
|  | 834 | while ((bio = bio_list_pop(&recover))) | 
|  | 835 | rh_delay(&ms->rh, bio); | 
|  | 836 |  | 
|  | 837 | while ((bio = bio_list_pop(&nosync))) { | 
| Jonathan E Brassow | a1a1908 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 838 | map_bio(ms, ms->default_mirror, bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | generic_make_request(bio); | 
|  | 840 | } | 
|  | 841 | } | 
|  | 842 |  | 
|  | 843 | /*----------------------------------------------------------------- | 
|  | 844 | * kmirrord | 
|  | 845 | *---------------------------------------------------------------*/ | 
|  | 846 | static LIST_HEAD(_mirror_sets); | 
|  | 847 | static DECLARE_RWSEM(_mirror_sets_lock); | 
|  | 848 |  | 
|  | 849 | static void do_mirror(struct mirror_set *ms) | 
|  | 850 | { | 
|  | 851 | struct bio_list reads, writes; | 
|  | 852 |  | 
|  | 853 | spin_lock(&ms->lock); | 
|  | 854 | reads = ms->reads; | 
|  | 855 | writes = ms->writes; | 
|  | 856 | bio_list_init(&ms->reads); | 
|  | 857 | bio_list_init(&ms->writes); | 
|  | 858 | spin_unlock(&ms->lock); | 
|  | 859 |  | 
|  | 860 | rh_update_states(&ms->rh); | 
|  | 861 | do_recovery(ms); | 
|  | 862 | do_reads(ms, &reads); | 
|  | 863 | do_writes(ms, &writes); | 
|  | 864 | } | 
|  | 865 |  | 
|  | 866 | static void do_work(void *ignored) | 
|  | 867 | { | 
|  | 868 | struct mirror_set *ms; | 
|  | 869 |  | 
|  | 870 | down_read(&_mirror_sets_lock); | 
|  | 871 | list_for_each_entry (ms, &_mirror_sets, list) | 
|  | 872 | do_mirror(ms); | 
|  | 873 | up_read(&_mirror_sets_lock); | 
|  | 874 | } | 
|  | 875 |  | 
|  | 876 | /*----------------------------------------------------------------- | 
|  | 877 | * Target functions | 
|  | 878 | *---------------------------------------------------------------*/ | 
|  | 879 | static struct mirror_set *alloc_context(unsigned int nr_mirrors, | 
|  | 880 | uint32_t region_size, | 
|  | 881 | struct dm_target *ti, | 
|  | 882 | struct dirty_log *dl) | 
|  | 883 | { | 
|  | 884 | size_t len; | 
|  | 885 | struct mirror_set *ms = NULL; | 
|  | 886 |  | 
|  | 887 | if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) | 
|  | 888 | return NULL; | 
|  | 889 |  | 
|  | 890 | len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); | 
|  | 891 |  | 
|  | 892 | ms = kmalloc(len, GFP_KERNEL); | 
|  | 893 | if (!ms) { | 
|  | 894 | ti->error = "dm-mirror: Cannot allocate mirror context"; | 
|  | 895 | return NULL; | 
|  | 896 | } | 
|  | 897 |  | 
|  | 898 | memset(ms, 0, len); | 
|  | 899 | spin_lock_init(&ms->lock); | 
|  | 900 |  | 
|  | 901 | ms->ti = ti; | 
|  | 902 | ms->nr_mirrors = nr_mirrors; | 
|  | 903 | ms->nr_regions = dm_sector_div_up(ti->len, region_size); | 
|  | 904 | ms->in_sync = 0; | 
| Jonathan E Brassow | a1a1908 | 2006-01-06 00:20:05 -0800 | [diff] [blame] | 905 | ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 |  | 
|  | 907 | if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { | 
|  | 908 | ti->error = "dm-mirror: Error creating dirty region hash"; | 
|  | 909 | kfree(ms); | 
|  | 910 | return NULL; | 
|  | 911 | } | 
|  | 912 |  | 
|  | 913 | return ms; | 
|  | 914 | } | 
|  | 915 |  | 
|  | 916 | static void free_context(struct mirror_set *ms, struct dm_target *ti, | 
|  | 917 | unsigned int m) | 
|  | 918 | { | 
|  | 919 | while (m--) | 
|  | 920 | dm_put_device(ti, ms->mirror[m].dev); | 
|  | 921 |  | 
|  | 922 | rh_exit(&ms->rh); | 
|  | 923 | kfree(ms); | 
|  | 924 | } | 
|  | 925 |  | 
|  | 926 | static inline int _check_region_size(struct dm_target *ti, uint32_t size) | 
|  | 927 | { | 
|  | 928 | return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) || | 
|  | 929 | size > ti->len); | 
|  | 930 | } | 
|  | 931 |  | 
|  | 932 | static int get_mirror(struct mirror_set *ms, struct dm_target *ti, | 
|  | 933 | unsigned int mirror, char **argv) | 
|  | 934 | { | 
|  | 935 | sector_t offset; | 
|  | 936 |  | 
|  | 937 | if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) { | 
|  | 938 | ti->error = "dm-mirror: Invalid offset"; | 
|  | 939 | return -EINVAL; | 
|  | 940 | } | 
|  | 941 |  | 
|  | 942 | if (dm_get_device(ti, argv[0], offset, ti->len, | 
|  | 943 | dm_table_get_mode(ti->table), | 
|  | 944 | &ms->mirror[mirror].dev)) { | 
|  | 945 | ti->error = "dm-mirror: Device lookup failure"; | 
|  | 946 | return -ENXIO; | 
|  | 947 | } | 
|  | 948 |  | 
|  | 949 | ms->mirror[mirror].offset = offset; | 
|  | 950 |  | 
|  | 951 | return 0; | 
|  | 952 | } | 
|  | 953 |  | 
|  | 954 | static int add_mirror_set(struct mirror_set *ms) | 
|  | 955 | { | 
|  | 956 | down_write(&_mirror_sets_lock); | 
|  | 957 | list_add_tail(&ms->list, &_mirror_sets); | 
|  | 958 | up_write(&_mirror_sets_lock); | 
|  | 959 | wake(); | 
|  | 960 |  | 
|  | 961 | return 0; | 
|  | 962 | } | 
|  | 963 |  | 
|  | 964 | static void del_mirror_set(struct mirror_set *ms) | 
|  | 965 | { | 
|  | 966 | down_write(&_mirror_sets_lock); | 
|  | 967 | list_del(&ms->list); | 
|  | 968 | up_write(&_mirror_sets_lock); | 
|  | 969 | } | 
|  | 970 |  | 
|  | 971 | /* | 
|  | 972 | * Create dirty log: log_type #log_params <log_params> | 
|  | 973 | */ | 
|  | 974 | static struct dirty_log *create_dirty_log(struct dm_target *ti, | 
|  | 975 | unsigned int argc, char **argv, | 
|  | 976 | unsigned int *args_used) | 
|  | 977 | { | 
|  | 978 | unsigned int param_count; | 
|  | 979 | struct dirty_log *dl; | 
|  | 980 |  | 
|  | 981 | if (argc < 2) { | 
|  | 982 | ti->error = "dm-mirror: Insufficient mirror log arguments"; | 
|  | 983 | return NULL; | 
|  | 984 | } | 
|  | 985 |  | 
|  | 986 | if (sscanf(argv[1], "%u", ¶m_count) != 1) { | 
|  | 987 | ti->error = "dm-mirror: Invalid mirror log argument count"; | 
|  | 988 | return NULL; | 
|  | 989 | } | 
|  | 990 |  | 
|  | 991 | *args_used = 2 + param_count; | 
|  | 992 |  | 
|  | 993 | if (argc < *args_used) { | 
|  | 994 | ti->error = "dm-mirror: Insufficient mirror log arguments"; | 
|  | 995 | return NULL; | 
|  | 996 | } | 
|  | 997 |  | 
|  | 998 | dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); | 
|  | 999 | if (!dl) { | 
|  | 1000 | ti->error = "dm-mirror: Error creating mirror dirty log"; | 
|  | 1001 | return NULL; | 
|  | 1002 | } | 
|  | 1003 |  | 
|  | 1004 | if (!_check_region_size(ti, dl->type->get_region_size(dl))) { | 
|  | 1005 | ti->error = "dm-mirror: Invalid region size"; | 
|  | 1006 | dm_destroy_dirty_log(dl); | 
|  | 1007 | return NULL; | 
|  | 1008 | } | 
|  | 1009 |  | 
|  | 1010 | return dl; | 
|  | 1011 | } | 
|  | 1012 |  | 
|  | 1013 | /* | 
|  | 1014 | * Construct a mirror mapping: | 
|  | 1015 | * | 
|  | 1016 | * log_type #log_params <log_params> | 
|  | 1017 | * #mirrors [mirror_path offset]{2,} | 
|  | 1018 | * | 
|  | 1019 | * log_type is "core" or "disk" | 
|  | 1020 | * #log_params is between 1 and 3 | 
|  | 1021 | */ | 
|  | 1022 | #define DM_IO_PAGES 64 | 
|  | 1023 | static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 
|  | 1024 | { | 
|  | 1025 | int r; | 
|  | 1026 | unsigned int nr_mirrors, m, args_used; | 
|  | 1027 | struct mirror_set *ms; | 
|  | 1028 | struct dirty_log *dl; | 
|  | 1029 |  | 
|  | 1030 | dl = create_dirty_log(ti, argc, argv, &args_used); | 
|  | 1031 | if (!dl) | 
|  | 1032 | return -EINVAL; | 
|  | 1033 |  | 
|  | 1034 | argv += args_used; | 
|  | 1035 | argc -= args_used; | 
|  | 1036 |  | 
|  | 1037 | if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || | 
|  | 1038 | nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { | 
|  | 1039 | ti->error = "dm-mirror: Invalid number of mirrors"; | 
|  | 1040 | dm_destroy_dirty_log(dl); | 
|  | 1041 | return -EINVAL; | 
|  | 1042 | } | 
|  | 1043 |  | 
|  | 1044 | argv++, argc--; | 
|  | 1045 |  | 
|  | 1046 | if (argc != nr_mirrors * 2) { | 
|  | 1047 | ti->error = "dm-mirror: Wrong number of mirror arguments"; | 
|  | 1048 | dm_destroy_dirty_log(dl); | 
|  | 1049 | return -EINVAL; | 
|  | 1050 | } | 
|  | 1051 |  | 
|  | 1052 | ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); | 
|  | 1053 | if (!ms) { | 
|  | 1054 | dm_destroy_dirty_log(dl); | 
|  | 1055 | return -ENOMEM; | 
|  | 1056 | } | 
|  | 1057 |  | 
|  | 1058 | /* Get the mirror parameter sets */ | 
|  | 1059 | for (m = 0; m < nr_mirrors; m++) { | 
|  | 1060 | r = get_mirror(ms, ti, m, argv); | 
|  | 1061 | if (r) { | 
|  | 1062 | free_context(ms, ti, m); | 
|  | 1063 | return r; | 
|  | 1064 | } | 
|  | 1065 | argv += 2; | 
|  | 1066 | argc -= 2; | 
|  | 1067 | } | 
|  | 1068 |  | 
|  | 1069 | ti->private = ms; | 
| Alasdair G Kergon | d88854f | 2005-07-07 17:59:34 -0700 | [diff] [blame] | 1070 | ti->split_io = ms->rh.region_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 |  | 
|  | 1072 | r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); | 
|  | 1073 | if (r) { | 
|  | 1074 | free_context(ms, ti, ms->nr_mirrors); | 
|  | 1075 | return r; | 
|  | 1076 | } | 
|  | 1077 |  | 
|  | 1078 | add_mirror_set(ms); | 
|  | 1079 | return 0; | 
|  | 1080 | } | 
|  | 1081 |  | 
|  | 1082 | static void mirror_dtr(struct dm_target *ti) | 
|  | 1083 | { | 
|  | 1084 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 
|  | 1085 |  | 
|  | 1086 | del_mirror_set(ms); | 
|  | 1087 | kcopyd_client_destroy(ms->kcopyd_client); | 
|  | 1088 | free_context(ms, ti, ms->nr_mirrors); | 
|  | 1089 | } | 
|  | 1090 |  | 
|  | 1091 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) | 
|  | 1092 | { | 
|  | 1093 | int should_wake = 0; | 
|  | 1094 | struct bio_list *bl; | 
|  | 1095 |  | 
|  | 1096 | bl = (rw == WRITE) ? &ms->writes : &ms->reads; | 
|  | 1097 | spin_lock(&ms->lock); | 
|  | 1098 | should_wake = !(bl->head); | 
|  | 1099 | bio_list_add(bl, bio); | 
|  | 1100 | spin_unlock(&ms->lock); | 
|  | 1101 |  | 
|  | 1102 | if (should_wake) | 
|  | 1103 | wake(); | 
|  | 1104 | } | 
|  | 1105 |  | 
|  | 1106 | /* | 
|  | 1107 | * Mirror mapping function | 
|  | 1108 | */ | 
|  | 1109 | static int mirror_map(struct dm_target *ti, struct bio *bio, | 
|  | 1110 | union map_info *map_context) | 
|  | 1111 | { | 
|  | 1112 | int r, rw = bio_rw(bio); | 
|  | 1113 | struct mirror *m; | 
|  | 1114 | struct mirror_set *ms = ti->private; | 
|  | 1115 |  | 
|  | 1116 | map_context->ll = bio->bi_sector >> ms->rh.region_shift; | 
|  | 1117 |  | 
|  | 1118 | if (rw == WRITE) { | 
|  | 1119 | queue_bio(ms, bio, rw); | 
|  | 1120 | return 0; | 
|  | 1121 | } | 
|  | 1122 |  | 
|  | 1123 | r = ms->rh.log->type->in_sync(ms->rh.log, | 
|  | 1124 | bio_to_region(&ms->rh, bio), 0); | 
|  | 1125 | if (r < 0 && r != -EWOULDBLOCK) | 
|  | 1126 | return r; | 
|  | 1127 |  | 
|  | 1128 | if (r == -EWOULDBLOCK)	/* FIXME: ugly */ | 
|  | 1129 | r = 0; | 
|  | 1130 |  | 
|  | 1131 | /* | 
|  | 1132 | * We don't want to fast track a recovery just for a read | 
|  | 1133 | * ahead.  So we just let it silently fail. | 
|  | 1134 | * FIXME: get rid of this. | 
|  | 1135 | */ | 
|  | 1136 | if (!r && rw == READA) | 
|  | 1137 | return -EIO; | 
|  | 1138 |  | 
|  | 1139 | if (!r) { | 
|  | 1140 | /* Pass this io over to the daemon */ | 
|  | 1141 | queue_bio(ms, bio, rw); | 
|  | 1142 | return 0; | 
|  | 1143 | } | 
|  | 1144 |  | 
|  | 1145 | m = choose_mirror(ms, bio->bi_sector); | 
|  | 1146 | if (!m) | 
|  | 1147 | return -EIO; | 
|  | 1148 |  | 
|  | 1149 | map_bio(ms, m, bio); | 
|  | 1150 | return 1; | 
|  | 1151 | } | 
|  | 1152 |  | 
|  | 1153 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, | 
|  | 1154 | int error, union map_info *map_context) | 
|  | 1155 | { | 
|  | 1156 | int rw = bio_rw(bio); | 
|  | 1157 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 
|  | 1158 | region_t region = map_context->ll; | 
|  | 1159 |  | 
|  | 1160 | /* | 
|  | 1161 | * We need to dec pending if this was a write. | 
|  | 1162 | */ | 
|  | 1163 | if (rw == WRITE) | 
|  | 1164 | rh_dec(&ms->rh, region); | 
|  | 1165 |  | 
|  | 1166 | return 0; | 
|  | 1167 | } | 
|  | 1168 |  | 
|  | 1169 | static void mirror_postsuspend(struct dm_target *ti) | 
|  | 1170 | { | 
|  | 1171 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 
|  | 1172 | struct dirty_log *log = ms->rh.log; | 
|  | 1173 |  | 
|  | 1174 | rh_stop_recovery(&ms->rh); | 
|  | 1175 | if (log->type->suspend && log->type->suspend(log)) | 
|  | 1176 | /* FIXME: need better error handling */ | 
|  | 1177 | DMWARN("log suspend failed"); | 
|  | 1178 | } | 
|  | 1179 |  | 
|  | 1180 | static void mirror_resume(struct dm_target *ti) | 
|  | 1181 | { | 
|  | 1182 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 
|  | 1183 | struct dirty_log *log = ms->rh.log; | 
|  | 1184 | if (log->type->resume && log->type->resume(log)) | 
|  | 1185 | /* FIXME: need better error handling */ | 
|  | 1186 | DMWARN("log resume failed"); | 
|  | 1187 | rh_start_recovery(&ms->rh); | 
|  | 1188 | } | 
|  | 1189 |  | 
|  | 1190 | static int mirror_status(struct dm_target *ti, status_type_t type, | 
|  | 1191 | char *result, unsigned int maxlen) | 
|  | 1192 | { | 
|  | 1193 | unsigned int m, sz; | 
|  | 1194 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 
|  | 1195 |  | 
|  | 1196 | sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen); | 
|  | 1197 |  | 
|  | 1198 | switch (type) { | 
|  | 1199 | case STATUSTYPE_INFO: | 
|  | 1200 | DMEMIT("%d ", ms->nr_mirrors); | 
|  | 1201 | for (m = 0; m < ms->nr_mirrors; m++) | 
|  | 1202 | DMEMIT("%s ", ms->mirror[m].dev->name); | 
|  | 1203 |  | 
|  | 1204 | DMEMIT(SECTOR_FORMAT "/" SECTOR_FORMAT, | 
|  | 1205 | ms->rh.log->type->get_sync_count(ms->rh.log), | 
|  | 1206 | ms->nr_regions); | 
|  | 1207 | break; | 
|  | 1208 |  | 
|  | 1209 | case STATUSTYPE_TABLE: | 
|  | 1210 | DMEMIT("%d ", ms->nr_mirrors); | 
|  | 1211 | for (m = 0; m < ms->nr_mirrors; m++) | 
|  | 1212 | DMEMIT("%s " SECTOR_FORMAT " ", | 
|  | 1213 | ms->mirror[m].dev->name, ms->mirror[m].offset); | 
|  | 1214 | } | 
|  | 1215 |  | 
|  | 1216 | return 0; | 
|  | 1217 | } | 
|  | 1218 |  | 
|  | 1219 | static struct target_type mirror_target = { | 
|  | 1220 | .name	 = "mirror", | 
|  | 1221 | .version = {1, 0, 1}, | 
|  | 1222 | .module	 = THIS_MODULE, | 
|  | 1223 | .ctr	 = mirror_ctr, | 
|  | 1224 | .dtr	 = mirror_dtr, | 
|  | 1225 | .map	 = mirror_map, | 
|  | 1226 | .end_io	 = mirror_end_io, | 
|  | 1227 | .postsuspend = mirror_postsuspend, | 
|  | 1228 | .resume	 = mirror_resume, | 
|  | 1229 | .status	 = mirror_status, | 
|  | 1230 | }; | 
|  | 1231 |  | 
|  | 1232 | static int __init dm_mirror_init(void) | 
|  | 1233 | { | 
|  | 1234 | int r; | 
|  | 1235 |  | 
|  | 1236 | r = dm_dirty_log_init(); | 
|  | 1237 | if (r) | 
|  | 1238 | return r; | 
|  | 1239 |  | 
| Alasdair G Kergon | 48f1f53 | 2005-08-04 12:53:37 -0700 | [diff] [blame] | 1240 | _kmirrord_wq = create_singlethread_workqueue("kmirrord"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | if (!_kmirrord_wq) { | 
|  | 1242 | DMERR("couldn't start kmirrord"); | 
|  | 1243 | dm_dirty_log_exit(); | 
|  | 1244 | return r; | 
|  | 1245 | } | 
|  | 1246 | INIT_WORK(&_kmirrord_work, do_work, NULL); | 
|  | 1247 |  | 
|  | 1248 | r = dm_register_target(&mirror_target); | 
|  | 1249 | if (r < 0) { | 
|  | 1250 | DMERR("%s: Failed to register mirror target", | 
|  | 1251 | mirror_target.name); | 
|  | 1252 | dm_dirty_log_exit(); | 
|  | 1253 | destroy_workqueue(_kmirrord_wq); | 
|  | 1254 | } | 
|  | 1255 |  | 
|  | 1256 | return r; | 
|  | 1257 | } | 
|  | 1258 |  | 
|  | 1259 | static void __exit dm_mirror_exit(void) | 
|  | 1260 | { | 
|  | 1261 | int r; | 
|  | 1262 |  | 
|  | 1263 | r = dm_unregister_target(&mirror_target); | 
|  | 1264 | if (r < 0) | 
|  | 1265 | DMERR("%s: unregister failed %d", mirror_target.name, r); | 
|  | 1266 |  | 
|  | 1267 | destroy_workqueue(_kmirrord_wq); | 
|  | 1268 | dm_dirty_log_exit(); | 
|  | 1269 | } | 
|  | 1270 |  | 
|  | 1271 | /* Module hooks */ | 
|  | 1272 | module_init(dm_mirror_init); | 
|  | 1273 | module_exit(dm_mirror_exit); | 
|  | 1274 |  | 
|  | 1275 | MODULE_DESCRIPTION(DM_NAME " mirror target"); | 
|  | 1276 | MODULE_AUTHOR("Joe Thornber"); | 
|  | 1277 | MODULE_LICENSE("GPL"); |