| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2001 Sistina Software (UK) Limited. | 
|  | 3 | * Copyright (C) 2004 Red Hat, Inc. All rights reserved. | 
|  | 4 | * | 
|  | 5 | * This file is released under the GPL. | 
|  | 6 | */ | 
|  | 7 |  | 
|  | 8 | #include "dm.h" | 
|  | 9 |  | 
|  | 10 | #include <linux/module.h> | 
|  | 11 | #include <linux/vmalloc.h> | 
|  | 12 | #include <linux/blkdev.h> | 
|  | 13 | #include <linux/namei.h> | 
|  | 14 | #include <linux/ctype.h> | 
|  | 15 | #include <linux/slab.h> | 
|  | 16 | #include <linux/interrupt.h> | 
| Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 17 | #include <linux/mutex.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/atomic.h> | 
|  | 19 |  | 
| Alasdair G Kergon | 72d9486 | 2006-06-26 00:27:35 -0700 | [diff] [blame] | 20 | #define DM_MSG_PREFIX "table" | 
|  | 21 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #define MAX_DEPTH 16 | 
|  | 23 | #define NODE_SIZE L1_CACHE_BYTES | 
|  | 24 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) | 
|  | 25 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) | 
|  | 26 |  | 
|  | 27 | struct dm_table { | 
| Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 28 | struct mapped_device *md; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | atomic_t holders; | 
|  | 30 |  | 
|  | 31 | /* btree table */ | 
|  | 32 | unsigned int depth; | 
|  | 33 | unsigned int counts[MAX_DEPTH];	/* in nodes */ | 
|  | 34 | sector_t *index[MAX_DEPTH]; | 
|  | 35 |  | 
|  | 36 | unsigned int num_targets; | 
|  | 37 | unsigned int num_allocated; | 
|  | 38 | sector_t *highs; | 
|  | 39 | struct dm_target *targets; | 
|  | 40 |  | 
|  | 41 | /* | 
|  | 42 | * Indicates the rw permissions for the new logical | 
|  | 43 | * device.  This should be a combination of FMODE_READ | 
|  | 44 | * and FMODE_WRITE. | 
|  | 45 | */ | 
|  | 46 | int mode; | 
|  | 47 |  | 
|  | 48 | /* a list of devices used by this table */ | 
|  | 49 | struct list_head devices; | 
|  | 50 |  | 
|  | 51 | /* | 
|  | 52 | * These are optimistic limits taken from all the | 
|  | 53 | * targets, some targets will need smaller limits. | 
|  | 54 | */ | 
|  | 55 | struct io_restrictions limits; | 
|  | 56 |  | 
|  | 57 | /* events get handed up using this callback */ | 
|  | 58 | void (*event_fn)(void *); | 
|  | 59 | void *event_context; | 
|  | 60 | }; | 
|  | 61 |  | 
|  | 62 | /* | 
|  | 63 | * Similar to ceiling(log_size(n)) | 
|  | 64 | */ | 
|  | 65 | static unsigned int int_log(unsigned int n, unsigned int base) | 
|  | 66 | { | 
|  | 67 | int result = 0; | 
|  | 68 |  | 
|  | 69 | while (n > 1) { | 
|  | 70 | n = dm_div_up(n, base); | 
|  | 71 | result++; | 
|  | 72 | } | 
|  | 73 |  | 
|  | 74 | return result; | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | /* | 
|  | 78 | * Returns the minimum that is _not_ zero, unless both are zero. | 
|  | 79 | */ | 
|  | 80 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | 
|  | 81 |  | 
|  | 82 | /* | 
|  | 83 | * Combine two io_restrictions, always taking the lower value. | 
|  | 84 | */ | 
|  | 85 | static void combine_restrictions_low(struct io_restrictions *lhs, | 
|  | 86 | struct io_restrictions *rhs) | 
|  | 87 | { | 
|  | 88 | lhs->max_sectors = | 
|  | 89 | min_not_zero(lhs->max_sectors, rhs->max_sectors); | 
|  | 90 |  | 
|  | 91 | lhs->max_phys_segments = | 
|  | 92 | min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); | 
|  | 93 |  | 
|  | 94 | lhs->max_hw_segments = | 
|  | 95 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); | 
|  | 96 |  | 
|  | 97 | lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); | 
|  | 98 |  | 
|  | 99 | lhs->max_segment_size = | 
|  | 100 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | 
|  | 101 |  | 
| Neil Brown | 9121250 | 2007-12-13 14:16:04 +0000 | [diff] [blame] | 102 | lhs->max_hw_sectors = | 
|  | 103 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | 
|  | 104 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | lhs->seg_boundary_mask = | 
|  | 106 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | 
| NeilBrown | 969429b | 2006-03-27 01:17:49 -0800 | [diff] [blame] | 107 |  | 
| Vasily Averin | 5ec140e | 2007-10-31 08:33:24 +0100 | [diff] [blame] | 108 | lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); | 
|  | 109 |  | 
| NeilBrown | 969429b | 2006-03-27 01:17:49 -0800 | [diff] [blame] | 110 | lhs->no_cluster |= rhs->no_cluster; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | } | 
|  | 112 |  | 
|  | 113 | /* | 
|  | 114 | * Calculate the index of the child node of the n'th node k'th key. | 
|  | 115 | */ | 
|  | 116 | static inline unsigned int get_child(unsigned int n, unsigned int k) | 
|  | 117 | { | 
|  | 118 | return (n * CHILDREN_PER_NODE) + k; | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | /* | 
|  | 122 | * Return the n'th node of level l from table t. | 
|  | 123 | */ | 
|  | 124 | static inline sector_t *get_node(struct dm_table *t, | 
|  | 125 | unsigned int l, unsigned int n) | 
|  | 126 | { | 
|  | 127 | return t->index[l] + (n * KEYS_PER_NODE); | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | /* | 
|  | 131 | * Return the highest key that you could lookup from the n'th | 
|  | 132 | * node on level l of the btree. | 
|  | 133 | */ | 
|  | 134 | static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) | 
|  | 135 | { | 
|  | 136 | for (; l < t->depth - 1; l++) | 
|  | 137 | n = get_child(n, CHILDREN_PER_NODE - 1); | 
|  | 138 |  | 
|  | 139 | if (n >= t->counts[l]) | 
|  | 140 | return (sector_t) - 1; | 
|  | 141 |  | 
|  | 142 | return get_node(t, l, n)[KEYS_PER_NODE - 1]; | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | /* | 
|  | 146 | * Fills in a level of the btree based on the highs of the level | 
|  | 147 | * below it. | 
|  | 148 | */ | 
|  | 149 | static int setup_btree_index(unsigned int l, struct dm_table *t) | 
|  | 150 | { | 
|  | 151 | unsigned int n, k; | 
|  | 152 | sector_t *node; | 
|  | 153 |  | 
|  | 154 | for (n = 0U; n < t->counts[l]; n++) { | 
|  | 155 | node = get_node(t, l, n); | 
|  | 156 |  | 
|  | 157 | for (k = 0U; k < KEYS_PER_NODE; k++) | 
|  | 158 | node[k] = high(t, l + 1, get_child(n, k)); | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | return 0; | 
|  | 162 | } | 
|  | 163 |  | 
|  | 164 | void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) | 
|  | 165 | { | 
|  | 166 | unsigned long size; | 
|  | 167 | void *addr; | 
|  | 168 |  | 
|  | 169 | /* | 
|  | 170 | * Check that we're not going to overflow. | 
|  | 171 | */ | 
|  | 172 | if (nmemb > (ULONG_MAX / elem_size)) | 
|  | 173 | return NULL; | 
|  | 174 |  | 
|  | 175 | size = nmemb * elem_size; | 
|  | 176 | addr = vmalloc(size); | 
|  | 177 | if (addr) | 
|  | 178 | memset(addr, 0, size); | 
|  | 179 |  | 
|  | 180 | return addr; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | /* | 
|  | 184 | * highs, and targets are managed as dynamic arrays during a | 
|  | 185 | * table load. | 
|  | 186 | */ | 
|  | 187 | static int alloc_targets(struct dm_table *t, unsigned int num) | 
|  | 188 | { | 
|  | 189 | sector_t *n_highs; | 
|  | 190 | struct dm_target *n_targets; | 
|  | 191 | int n = t->num_targets; | 
|  | 192 |  | 
|  | 193 | /* | 
|  | 194 | * Allocate both the target array and offset array at once. | 
| Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 195 | * Append an empty entry to catch sectors beyond the end of | 
|  | 196 | * the device. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | */ | 
| Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 198 | n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | sizeof(sector_t)); | 
|  | 200 | if (!n_highs) | 
|  | 201 | return -ENOMEM; | 
|  | 202 |  | 
|  | 203 | n_targets = (struct dm_target *) (n_highs + num); | 
|  | 204 |  | 
|  | 205 | if (n) { | 
|  | 206 | memcpy(n_highs, t->highs, sizeof(*n_highs) * n); | 
|  | 207 | memcpy(n_targets, t->targets, sizeof(*n_targets) * n); | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); | 
|  | 211 | vfree(t->highs); | 
|  | 212 |  | 
|  | 213 | t->num_allocated = num; | 
|  | 214 | t->highs = n_highs; | 
|  | 215 | t->targets = n_targets; | 
|  | 216 |  | 
|  | 217 | return 0; | 
|  | 218 | } | 
|  | 219 |  | 
| Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 220 | int dm_table_create(struct dm_table **result, int mode, | 
|  | 221 | unsigned num_targets, struct mapped_device *md) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | { | 
| Dmitry Monakhov | 094262d | 2007-10-19 22:38:51 +0100 | [diff] [blame] | 223 | struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 |  | 
|  | 225 | if (!t) | 
|  | 226 | return -ENOMEM; | 
|  | 227 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | INIT_LIST_HEAD(&t->devices); | 
|  | 229 | atomic_set(&t->holders, 1); | 
|  | 230 |  | 
|  | 231 | if (!num_targets) | 
|  | 232 | num_targets = KEYS_PER_NODE; | 
|  | 233 |  | 
|  | 234 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); | 
|  | 235 |  | 
|  | 236 | if (alloc_targets(t, num_targets)) { | 
|  | 237 | kfree(t); | 
|  | 238 | t = NULL; | 
|  | 239 | return -ENOMEM; | 
|  | 240 | } | 
|  | 241 |  | 
|  | 242 | t->mode = mode; | 
| Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 243 | t->md = md; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | *result = t; | 
|  | 245 | return 0; | 
|  | 246 | } | 
|  | 247 |  | 
|  | 248 | static void free_devices(struct list_head *devices) | 
|  | 249 | { | 
|  | 250 | struct list_head *tmp, *next; | 
|  | 251 |  | 
| Paul Jimenez | afb2452 | 2008-02-08 02:09:59 +0000 | [diff] [blame] | 252 | list_for_each_safe(tmp, next, devices) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | kfree(dd); | 
|  | 255 | } | 
|  | 256 | } | 
|  | 257 |  | 
| Alasdair G Kergon | 5e198d9 | 2005-05-05 16:16:09 -0700 | [diff] [blame] | 258 | static void table_destroy(struct dm_table *t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | { | 
|  | 260 | unsigned int i; | 
|  | 261 |  | 
|  | 262 | /* free the indexes (see dm_table_complete) */ | 
|  | 263 | if (t->depth >= 2) | 
|  | 264 | vfree(t->index[t->depth - 2]); | 
|  | 265 |  | 
|  | 266 | /* free the targets */ | 
|  | 267 | for (i = 0; i < t->num_targets; i++) { | 
|  | 268 | struct dm_target *tgt = t->targets + i; | 
|  | 269 |  | 
|  | 270 | if (tgt->type->dtr) | 
|  | 271 | tgt->type->dtr(tgt); | 
|  | 272 |  | 
|  | 273 | dm_put_target_type(tgt->type); | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | vfree(t->highs); | 
|  | 277 |  | 
|  | 278 | /* free the device list */ | 
|  | 279 | if (t->devices.next != &t->devices) { | 
|  | 280 | DMWARN("devices still present during destroy: " | 
|  | 281 | "dm_table_remove_device calls missing"); | 
|  | 282 |  | 
|  | 283 | free_devices(&t->devices); | 
|  | 284 | } | 
|  | 285 |  | 
|  | 286 | kfree(t); | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | void dm_table_get(struct dm_table *t) | 
|  | 290 | { | 
|  | 291 | atomic_inc(&t->holders); | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | void dm_table_put(struct dm_table *t) | 
|  | 295 | { | 
|  | 296 | if (!t) | 
|  | 297 | return; | 
|  | 298 |  | 
|  | 299 | if (atomic_dec_and_test(&t->holders)) | 
|  | 300 | table_destroy(t); | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | /* | 
|  | 304 | * Checks to see if we need to extend highs or targets. | 
|  | 305 | */ | 
|  | 306 | static inline int check_space(struct dm_table *t) | 
|  | 307 | { | 
|  | 308 | if (t->num_targets >= t->num_allocated) | 
|  | 309 | return alloc_targets(t, t->num_allocated * 2); | 
|  | 310 |  | 
|  | 311 | return 0; | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | /* | 
|  | 315 | * Convert a device path to a dev_t. | 
|  | 316 | */ | 
|  | 317 | static int lookup_device(const char *path, dev_t *dev) | 
|  | 318 | { | 
|  | 319 | int r; | 
|  | 320 | struct nameidata nd; | 
|  | 321 | struct inode *inode; | 
|  | 322 |  | 
|  | 323 | if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd))) | 
|  | 324 | return r; | 
|  | 325 |  | 
| Jan Blunck | 4ac9137 | 2008-02-14 19:34:32 -0800 | [diff] [blame] | 326 | inode = nd.path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | if (!inode) { | 
|  | 328 | r = -ENOENT; | 
|  | 329 | goto out; | 
|  | 330 | } | 
|  | 331 |  | 
|  | 332 | if (!S_ISBLK(inode->i_mode)) { | 
|  | 333 | r = -ENOTBLK; | 
|  | 334 | goto out; | 
|  | 335 | } | 
|  | 336 |  | 
|  | 337 | *dev = inode->i_rdev; | 
|  | 338 |  | 
|  | 339 | out: | 
| Jan Blunck | 1d957f9 | 2008-02-14 19:34:35 -0800 | [diff] [blame] | 340 | path_put(&nd.path); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | return r; | 
|  | 342 | } | 
|  | 343 |  | 
|  | 344 | /* | 
|  | 345 | * See if we've already got a device in the list. | 
|  | 346 | */ | 
|  | 347 | static struct dm_dev *find_device(struct list_head *l, dev_t dev) | 
|  | 348 | { | 
|  | 349 | struct dm_dev *dd; | 
|  | 350 |  | 
|  | 351 | list_for_each_entry (dd, l, list) | 
|  | 352 | if (dd->bdev->bd_dev == dev) | 
|  | 353 | return dd; | 
|  | 354 |  | 
|  | 355 | return NULL; | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | /* | 
|  | 359 | * Open a device so we can use it as a map destination. | 
|  | 360 | */ | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 361 | static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | { | 
|  | 363 | static char *_claim_ptr = "I belong to device-mapper"; | 
|  | 364 | struct block_device *bdev; | 
|  | 365 |  | 
|  | 366 | int r; | 
|  | 367 |  | 
| Eric Sesterhenn | 547bc92 | 2006-03-26 18:22:50 +0200 | [diff] [blame] | 368 | BUG_ON(d->bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 |  | 
|  | 370 | bdev = open_by_devnum(dev, d->mode); | 
|  | 371 | if (IS_ERR(bdev)) | 
|  | 372 | return PTR_ERR(bdev); | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 373 | r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | if (r) | 
|  | 375 | blkdev_put(bdev); | 
|  | 376 | else | 
|  | 377 | d->bdev = bdev; | 
|  | 378 | return r; | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | /* | 
|  | 382 | * Close a device that we've been using. | 
|  | 383 | */ | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 384 | static void close_dev(struct dm_dev *d, struct mapped_device *md) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | { | 
|  | 386 | if (!d->bdev) | 
|  | 387 | return; | 
|  | 388 |  | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 389 | bd_release_from_disk(d->bdev, dm_disk(md)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | blkdev_put(d->bdev); | 
|  | 391 | d->bdev = NULL; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | /* | 
| Mike Anderson | 2cd54d9 | 2007-05-09 02:32:57 -0700 | [diff] [blame] | 395 | * If possible, this checks an area of a destination device is valid. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | */ | 
|  | 397 | static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) | 
|  | 398 | { | 
| Mike Anderson | 2cd54d9 | 2007-05-09 02:32:57 -0700 | [diff] [blame] | 399 | sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; | 
|  | 400 |  | 
|  | 401 | if (!dev_size) | 
|  | 402 | return 1; | 
|  | 403 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | return ((start < dev_size) && (len <= (dev_size - start))); | 
|  | 405 | } | 
|  | 406 |  | 
|  | 407 | /* | 
|  | 408 | * This upgrades the mode on an already open dm_dev.  Being | 
|  | 409 | * careful to leave things as they were if we fail to reopen the | 
|  | 410 | * device. | 
|  | 411 | */ | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 412 | static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | { | 
|  | 414 | int r; | 
|  | 415 | struct dm_dev dd_copy; | 
|  | 416 | dev_t dev = dd->bdev->bd_dev; | 
|  | 417 |  | 
|  | 418 | dd_copy = *dd; | 
|  | 419 |  | 
|  | 420 | dd->mode |= new_mode; | 
|  | 421 | dd->bdev = NULL; | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 422 | r = open_dev(dd, dev, md); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | if (!r) | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 424 | close_dev(&dd_copy, md); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | else | 
|  | 426 | *dd = dd_copy; | 
|  | 427 |  | 
|  | 428 | return r; | 
|  | 429 | } | 
|  | 430 |  | 
|  | 431 | /* | 
|  | 432 | * Add a device to the list, or just increment the usage count if | 
|  | 433 | * it's already present. | 
|  | 434 | */ | 
|  | 435 | static int __table_get_device(struct dm_table *t, struct dm_target *ti, | 
|  | 436 | const char *path, sector_t start, sector_t len, | 
|  | 437 | int mode, struct dm_dev **result) | 
|  | 438 | { | 
|  | 439 | int r; | 
| Andrew Morton | 69a2ce7 | 2008-02-08 02:10:14 +0000 | [diff] [blame] | 440 | dev_t uninitialized_var(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | struct dm_dev *dd; | 
|  | 442 | unsigned int major, minor; | 
|  | 443 |  | 
| Eric Sesterhenn | 547bc92 | 2006-03-26 18:22:50 +0200 | [diff] [blame] | 444 | BUG_ON(!t); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 |  | 
|  | 446 | if (sscanf(path, "%u:%u", &major, &minor) == 2) { | 
|  | 447 | /* Extract the major/minor numbers */ | 
|  | 448 | dev = MKDEV(major, minor); | 
|  | 449 | if (MAJOR(dev) != major || MINOR(dev) != minor) | 
|  | 450 | return -EOVERFLOW; | 
|  | 451 | } else { | 
|  | 452 | /* convert the path to a device */ | 
|  | 453 | if ((r = lookup_device(path, &dev))) | 
|  | 454 | return r; | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 | dd = find_device(&t->devices, dev); | 
|  | 458 | if (!dd) { | 
|  | 459 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | 
|  | 460 | if (!dd) | 
|  | 461 | return -ENOMEM; | 
|  | 462 |  | 
|  | 463 | dd->mode = mode; | 
|  | 464 | dd->bdev = NULL; | 
|  | 465 |  | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 466 | if ((r = open_dev(dd, dev, t->md))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | kfree(dd); | 
|  | 468 | return r; | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | format_dev_t(dd->name, dev); | 
|  | 472 |  | 
|  | 473 | atomic_set(&dd->count, 0); | 
|  | 474 | list_add(&dd->list, &t->devices); | 
|  | 475 |  | 
|  | 476 | } else if (dd->mode != (mode | dd->mode)) { | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 477 | r = upgrade_mode(dd, mode, t->md); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | if (r) | 
|  | 479 | return r; | 
|  | 480 | } | 
|  | 481 | atomic_inc(&dd->count); | 
|  | 482 |  | 
|  | 483 | if (!check_device_area(dd, start, len)) { | 
|  | 484 | DMWARN("device %s too small for target", path); | 
|  | 485 | dm_put_device(ti, dd); | 
|  | 486 | return -EINVAL; | 
|  | 487 | } | 
|  | 488 |  | 
|  | 489 | *result = dd; | 
|  | 490 |  | 
|  | 491 | return 0; | 
|  | 492 | } | 
|  | 493 |  | 
| Bryn Reeves | 3cb4021 | 2006-10-03 01:15:42 -0700 | [diff] [blame] | 494 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | 
|  | 495 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 496 | struct request_queue *q = bdev_get_queue(bdev); | 
| Bryn Reeves | 3cb4021 | 2006-10-03 01:15:42 -0700 | [diff] [blame] | 497 | struct io_restrictions *rs = &ti->limits; | 
|  | 498 |  | 
|  | 499 | /* | 
|  | 500 | * Combine the device limits low. | 
|  | 501 | * | 
|  | 502 | * FIXME: if we move an io_restriction struct | 
|  | 503 | *        into q this would just be a call to | 
|  | 504 | *        combine_restrictions_low() | 
|  | 505 | */ | 
|  | 506 | rs->max_sectors = | 
|  | 507 | min_not_zero(rs->max_sectors, q->max_sectors); | 
|  | 508 |  | 
|  | 509 | /* FIXME: Device-Mapper on top of RAID-0 breaks because DM | 
|  | 510 | *        currently doesn't honor MD's merge_bvec_fn routine. | 
|  | 511 | *        In this case, we'll force DM to use PAGE_SIZE or | 
|  | 512 | *        smaller I/O, just to be safe. A better fix is in the | 
|  | 513 | *        works, but add this for the time being so it will at | 
|  | 514 | *        least operate correctly. | 
|  | 515 | */ | 
|  | 516 | if (q->merge_bvec_fn) | 
|  | 517 | rs->max_sectors = | 
|  | 518 | min_not_zero(rs->max_sectors, | 
|  | 519 | (unsigned int) (PAGE_SIZE >> 9)); | 
|  | 520 |  | 
|  | 521 | rs->max_phys_segments = | 
|  | 522 | min_not_zero(rs->max_phys_segments, | 
|  | 523 | q->max_phys_segments); | 
|  | 524 |  | 
|  | 525 | rs->max_hw_segments = | 
|  | 526 | min_not_zero(rs->max_hw_segments, q->max_hw_segments); | 
|  | 527 |  | 
|  | 528 | rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); | 
|  | 529 |  | 
|  | 530 | rs->max_segment_size = | 
|  | 531 | min_not_zero(rs->max_segment_size, q->max_segment_size); | 
|  | 532 |  | 
| Neil Brown | 9121250 | 2007-12-13 14:16:04 +0000 | [diff] [blame] | 533 | rs->max_hw_sectors = | 
|  | 534 | min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); | 
|  | 535 |  | 
| Bryn Reeves | 3cb4021 | 2006-10-03 01:15:42 -0700 | [diff] [blame] | 536 | rs->seg_boundary_mask = | 
|  | 537 | min_not_zero(rs->seg_boundary_mask, | 
|  | 538 | q->seg_boundary_mask); | 
|  | 539 |  | 
| Vasily Averin | 5ec140e | 2007-10-31 08:33:24 +0100 | [diff] [blame] | 540 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); | 
|  | 541 |  | 
| Bryn Reeves | 3cb4021 | 2006-10-03 01:15:42 -0700 | [diff] [blame] | 542 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 
|  | 543 | } | 
|  | 544 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 |  | 
|  | 546 | int dm_get_device(struct dm_target *ti, const char *path, sector_t start, | 
|  | 547 | sector_t len, int mode, struct dm_dev **result) | 
|  | 548 | { | 
|  | 549 | int r = __table_get_device(ti->table, ti, path, | 
|  | 550 | start, len, mode, result); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 |  | 
| Bryn Reeves | 3cb4021 | 2006-10-03 01:15:42 -0700 | [diff] [blame] | 552 | if (!r) | 
|  | 553 | dm_set_device_limits(ti, (*result)->bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 |  | 
|  | 555 | return r; | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | /* | 
|  | 559 | * Decrement a devices use count and remove it if necessary. | 
|  | 560 | */ | 
|  | 561 | void dm_put_device(struct dm_target *ti, struct dm_dev *dd) | 
|  | 562 | { | 
|  | 563 | if (atomic_dec_and_test(&dd->count)) { | 
| Jun'ichi Nomura | f165921 | 2006-03-27 01:17:59 -0800 | [diff] [blame] | 564 | close_dev(dd, ti->table->md); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | list_del(&dd->list); | 
|  | 566 | kfree(dd); | 
|  | 567 | } | 
|  | 568 | } | 
|  | 569 |  | 
|  | 570 | /* | 
|  | 571 | * Checks to see if the target joins onto the end of the table. | 
|  | 572 | */ | 
|  | 573 | static int adjoin(struct dm_table *table, struct dm_target *ti) | 
|  | 574 | { | 
|  | 575 | struct dm_target *prev; | 
|  | 576 |  | 
|  | 577 | if (!table->num_targets) | 
|  | 578 | return !ti->begin; | 
|  | 579 |  | 
|  | 580 | prev = &table->targets[table->num_targets - 1]; | 
|  | 581 | return (ti->begin == (prev->begin + prev->len)); | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | /* | 
|  | 585 | * Used to dynamically allocate the arg array. | 
|  | 586 | */ | 
|  | 587 | static char **realloc_argv(unsigned *array_size, char **old_argv) | 
|  | 588 | { | 
|  | 589 | char **argv; | 
|  | 590 | unsigned new_size; | 
|  | 591 |  | 
|  | 592 | new_size = *array_size ? *array_size * 2 : 64; | 
|  | 593 | argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); | 
|  | 594 | if (argv) { | 
|  | 595 | memcpy(argv, old_argv, *array_size * sizeof(*argv)); | 
|  | 596 | *array_size = new_size; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | kfree(old_argv); | 
|  | 600 | return argv; | 
|  | 601 | } | 
|  | 602 |  | 
|  | 603 | /* | 
|  | 604 | * Destructively splits up the argument list to pass to ctr. | 
|  | 605 | */ | 
|  | 606 | int dm_split_args(int *argc, char ***argvp, char *input) | 
|  | 607 | { | 
|  | 608 | char *start, *end = input, *out, **argv = NULL; | 
|  | 609 | unsigned array_size = 0; | 
|  | 610 |  | 
|  | 611 | *argc = 0; | 
| David Teigland | 814d686 | 2006-06-26 00:27:31 -0700 | [diff] [blame] | 612 |  | 
|  | 613 | if (!input) { | 
|  | 614 | *argvp = NULL; | 
|  | 615 | return 0; | 
|  | 616 | } | 
|  | 617 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | argv = realloc_argv(&array_size, argv); | 
|  | 619 | if (!argv) | 
|  | 620 | return -ENOMEM; | 
|  | 621 |  | 
|  | 622 | while (1) { | 
|  | 623 | start = end; | 
|  | 624 |  | 
|  | 625 | /* Skip whitespace */ | 
|  | 626 | while (*start && isspace(*start)) | 
|  | 627 | start++; | 
|  | 628 |  | 
|  | 629 | if (!*start) | 
|  | 630 | break;	/* success, we hit the end */ | 
|  | 631 |  | 
|  | 632 | /* 'out' is used to remove any back-quotes */ | 
|  | 633 | end = out = start; | 
|  | 634 | while (*end) { | 
|  | 635 | /* Everything apart from '\0' can be quoted */ | 
|  | 636 | if (*end == '\\' && *(end + 1)) { | 
|  | 637 | *out++ = *(end + 1); | 
|  | 638 | end += 2; | 
|  | 639 | continue; | 
|  | 640 | } | 
|  | 641 |  | 
|  | 642 | if (isspace(*end)) | 
|  | 643 | break;	/* end of token */ | 
|  | 644 |  | 
|  | 645 | *out++ = *end++; | 
|  | 646 | } | 
|  | 647 |  | 
|  | 648 | /* have we already filled the array ? */ | 
|  | 649 | if ((*argc + 1) > array_size) { | 
|  | 650 | argv = realloc_argv(&array_size, argv); | 
|  | 651 | if (!argv) | 
|  | 652 | return -ENOMEM; | 
|  | 653 | } | 
|  | 654 |  | 
|  | 655 | /* we know this is whitespace */ | 
|  | 656 | if (*end) | 
|  | 657 | end++; | 
|  | 658 |  | 
|  | 659 | /* terminate the string and put it in the array */ | 
|  | 660 | *out = '\0'; | 
|  | 661 | argv[*argc] = start; | 
|  | 662 | (*argc)++; | 
|  | 663 | } | 
|  | 664 |  | 
|  | 665 | *argvp = argv; | 
|  | 666 | return 0; | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | static void check_for_valid_limits(struct io_restrictions *rs) | 
|  | 670 | { | 
|  | 671 | if (!rs->max_sectors) | 
| Mike Christie | defd94b | 2005-12-05 02:37:06 -0600 | [diff] [blame] | 672 | rs->max_sectors = SAFE_MAX_SECTORS; | 
| Neil Brown | 9121250 | 2007-12-13 14:16:04 +0000 | [diff] [blame] | 673 | if (!rs->max_hw_sectors) | 
|  | 674 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | if (!rs->max_phys_segments) | 
|  | 676 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | 
|  | 677 | if (!rs->max_hw_segments) | 
|  | 678 | rs->max_hw_segments = MAX_HW_SEGMENTS; | 
|  | 679 | if (!rs->hardsect_size) | 
|  | 680 | rs->hardsect_size = 1 << SECTOR_SHIFT; | 
|  | 681 | if (!rs->max_segment_size) | 
|  | 682 | rs->max_segment_size = MAX_SEGMENT_SIZE; | 
|  | 683 | if (!rs->seg_boundary_mask) | 
|  | 684 | rs->seg_boundary_mask = -1; | 
| Vasily Averin | 5ec140e | 2007-10-31 08:33:24 +0100 | [diff] [blame] | 685 | if (!rs->bounce_pfn) | 
|  | 686 | rs->bounce_pfn = -1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | } | 
|  | 688 |  | 
|  | 689 | int dm_table_add_target(struct dm_table *t, const char *type, | 
|  | 690 | sector_t start, sector_t len, char *params) | 
|  | 691 | { | 
|  | 692 | int r = -EINVAL, argc; | 
|  | 693 | char **argv; | 
|  | 694 | struct dm_target *tgt; | 
|  | 695 |  | 
|  | 696 | if ((r = check_space(t))) | 
|  | 697 | return r; | 
|  | 698 |  | 
|  | 699 | tgt = t->targets + t->num_targets; | 
|  | 700 | memset(tgt, 0, sizeof(*tgt)); | 
|  | 701 |  | 
|  | 702 | if (!len) { | 
| Alasdair G Kergon | 72d9486 | 2006-06-26 00:27:35 -0700 | [diff] [blame] | 703 | DMERR("%s: zero-length target", dm_device_name(t->md)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | return -EINVAL; | 
|  | 705 | } | 
|  | 706 |  | 
|  | 707 | tgt->type = dm_get_target_type(type); | 
|  | 708 | if (!tgt->type) { | 
| Alasdair G Kergon | 72d9486 | 2006-06-26 00:27:35 -0700 | [diff] [blame] | 709 | DMERR("%s: %s: unknown target type", dm_device_name(t->md), | 
|  | 710 | type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | return -EINVAL; | 
|  | 712 | } | 
|  | 713 |  | 
|  | 714 | tgt->table = t; | 
|  | 715 | tgt->begin = start; | 
|  | 716 | tgt->len = len; | 
|  | 717 | tgt->error = "Unknown error"; | 
|  | 718 |  | 
|  | 719 | /* | 
|  | 720 | * Does this target adjoin the previous one ? | 
|  | 721 | */ | 
|  | 722 | if (!adjoin(t, tgt)) { | 
|  | 723 | tgt->error = "Gap in table"; | 
|  | 724 | r = -EINVAL; | 
|  | 725 | goto bad; | 
|  | 726 | } | 
|  | 727 |  | 
|  | 728 | r = dm_split_args(&argc, &argv, params); | 
|  | 729 | if (r) { | 
|  | 730 | tgt->error = "couldn't split parameters (insufficient memory)"; | 
|  | 731 | goto bad; | 
|  | 732 | } | 
|  | 733 |  | 
|  | 734 | r = tgt->type->ctr(tgt, argc, argv); | 
|  | 735 | kfree(argv); | 
|  | 736 | if (r) | 
|  | 737 | goto bad; | 
|  | 738 |  | 
|  | 739 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | 
|  | 740 |  | 
|  | 741 | /* FIXME: the plan is to combine high here and then have | 
|  | 742 | * the merge fn apply the target level restrictions. */ | 
|  | 743 | combine_restrictions_low(&t->limits, &tgt->limits); | 
|  | 744 | return 0; | 
|  | 745 |  | 
|  | 746 | bad: | 
| Alasdair G Kergon | 72d9486 | 2006-06-26 00:27:35 -0700 | [diff] [blame] | 747 | DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | dm_put_target_type(tgt->type); | 
|  | 749 | return r; | 
|  | 750 | } | 
|  | 751 |  | 
|  | 752 | static int setup_indexes(struct dm_table *t) | 
|  | 753 | { | 
|  | 754 | int i; | 
|  | 755 | unsigned int total = 0; | 
|  | 756 | sector_t *indexes; | 
|  | 757 |  | 
|  | 758 | /* allocate the space for *all* the indexes */ | 
|  | 759 | for (i = t->depth - 2; i >= 0; i--) { | 
|  | 760 | t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); | 
|  | 761 | total += t->counts[i]; | 
|  | 762 | } | 
|  | 763 |  | 
|  | 764 | indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); | 
|  | 765 | if (!indexes) | 
|  | 766 | return -ENOMEM; | 
|  | 767 |  | 
|  | 768 | /* set up internal nodes, bottom-up */ | 
| Jun'ichi Nomura | 82d601d | 2008-02-08 02:10:04 +0000 | [diff] [blame] | 769 | for (i = t->depth - 2; i >= 0; i--) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | t->index[i] = indexes; | 
|  | 771 | indexes += (KEYS_PER_NODE * t->counts[i]); | 
|  | 772 | setup_btree_index(i, t); | 
|  | 773 | } | 
|  | 774 |  | 
|  | 775 | return 0; | 
|  | 776 | } | 
|  | 777 |  | 
|  | 778 | /* | 
|  | 779 | * Builds the btree to index the map. | 
|  | 780 | */ | 
|  | 781 | int dm_table_complete(struct dm_table *t) | 
|  | 782 | { | 
|  | 783 | int r = 0; | 
|  | 784 | unsigned int leaf_nodes; | 
|  | 785 |  | 
|  | 786 | check_for_valid_limits(&t->limits); | 
|  | 787 |  | 
|  | 788 | /* how many indexes will the btree have ? */ | 
|  | 789 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | 
|  | 790 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | 
|  | 791 |  | 
|  | 792 | /* leaf layer has already been set up */ | 
|  | 793 | t->counts[t->depth - 1] = leaf_nodes; | 
|  | 794 | t->index[t->depth - 1] = t->highs; | 
|  | 795 |  | 
|  | 796 | if (t->depth >= 2) | 
|  | 797 | r = setup_indexes(t); | 
|  | 798 |  | 
|  | 799 | return r; | 
|  | 800 | } | 
|  | 801 |  | 
| Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 802 | static DEFINE_MUTEX(_event_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | void dm_table_event_callback(struct dm_table *t, | 
|  | 804 | void (*fn)(void *), void *context) | 
|  | 805 | { | 
| Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 806 | mutex_lock(&_event_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | t->event_fn = fn; | 
|  | 808 | t->event_context = context; | 
| Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 809 | mutex_unlock(&_event_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 | } | 
|  | 811 |  | 
|  | 812 | void dm_table_event(struct dm_table *t) | 
|  | 813 | { | 
|  | 814 | /* | 
|  | 815 | * You can no longer call dm_table_event() from interrupt | 
|  | 816 | * context, use a bottom half instead. | 
|  | 817 | */ | 
|  | 818 | BUG_ON(in_interrupt()); | 
|  | 819 |  | 
| Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 820 | mutex_lock(&_event_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | if (t->event_fn) | 
|  | 822 | t->event_fn(t->event_context); | 
| Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 823 | mutex_unlock(&_event_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | } | 
|  | 825 |  | 
|  | 826 | sector_t dm_table_get_size(struct dm_table *t) | 
|  | 827 | { | 
|  | 828 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; | 
|  | 829 | } | 
|  | 830 |  | 
|  | 831 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | 
|  | 832 | { | 
| Milan Broz | 1435353 | 2006-06-26 00:27:27 -0700 | [diff] [blame] | 833 | if (index >= t->num_targets) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | return NULL; | 
|  | 835 |  | 
|  | 836 | return t->targets + index; | 
|  | 837 | } | 
|  | 838 |  | 
|  | 839 | /* | 
|  | 840 | * Search the btree for the correct target. | 
| Jun'ichi Nomura | 512875b | 2007-12-13 14:15:25 +0000 | [diff] [blame] | 841 | * | 
|  | 842 | * Caller should check returned pointer with dm_target_is_valid() | 
|  | 843 | * to trap I/O beyond end of device. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | */ | 
|  | 845 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | 
|  | 846 | { | 
|  | 847 | unsigned int l, n = 0, k = 0; | 
|  | 848 | sector_t *node; | 
|  | 849 |  | 
|  | 850 | for (l = 0; l < t->depth; l++) { | 
|  | 851 | n = get_child(n, k); | 
|  | 852 | node = get_node(t, l, n); | 
|  | 853 |  | 
|  | 854 | for (k = 0; k < KEYS_PER_NODE; k++) | 
|  | 855 | if (node[k] >= sector) | 
|  | 856 | break; | 
|  | 857 | } | 
|  | 858 |  | 
|  | 859 | return &t->targets[(KEYS_PER_NODE * n) + k]; | 
|  | 860 | } | 
|  | 861 |  | 
|  | 862 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | 
|  | 863 | { | 
|  | 864 | /* | 
|  | 865 | * Make sure we obey the optimistic sub devices | 
|  | 866 | * restrictions. | 
|  | 867 | */ | 
|  | 868 | blk_queue_max_sectors(q, t->limits.max_sectors); | 
|  | 869 | q->max_phys_segments = t->limits.max_phys_segments; | 
|  | 870 | q->max_hw_segments = t->limits.max_hw_segments; | 
|  | 871 | q->hardsect_size = t->limits.hardsect_size; | 
|  | 872 | q->max_segment_size = t->limits.max_segment_size; | 
| Neil Brown | 9121250 | 2007-12-13 14:16:04 +0000 | [diff] [blame] | 873 | q->max_hw_sectors = t->limits.max_hw_sectors; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | q->seg_boundary_mask = t->limits.seg_boundary_mask; | 
| Vasily Averin | 5ec140e | 2007-10-31 08:33:24 +0100 | [diff] [blame] | 875 | q->bounce_pfn = t->limits.bounce_pfn; | 
| Jens Axboe | c9a3f6d | 2008-04-29 19:12:35 +0200 | [diff] [blame] | 876 |  | 
| NeilBrown | 969429b | 2006-03-27 01:17:49 -0800 | [diff] [blame] | 877 | if (t->limits.no_cluster) | 
| Jens Axboe | c9a3f6d | 2008-04-29 19:12:35 +0200 | [diff] [blame] | 878 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | 
| NeilBrown | 969429b | 2006-03-27 01:17:49 -0800 | [diff] [blame] | 879 | else | 
| Jens Axboe | c9a3f6d | 2008-04-29 19:12:35 +0200 | [diff] [blame] | 880 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); | 
| NeilBrown | 969429b | 2006-03-27 01:17:49 -0800 | [diff] [blame] | 881 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | } | 
|  | 883 |  | 
|  | 884 | unsigned int dm_table_get_num_targets(struct dm_table *t) | 
|  | 885 | { | 
|  | 886 | return t->num_targets; | 
|  | 887 | } | 
|  | 888 |  | 
|  | 889 | struct list_head *dm_table_get_devices(struct dm_table *t) | 
|  | 890 | { | 
|  | 891 | return &t->devices; | 
|  | 892 | } | 
|  | 893 |  | 
|  | 894 | int dm_table_get_mode(struct dm_table *t) | 
|  | 895 | { | 
|  | 896 | return t->mode; | 
|  | 897 | } | 
|  | 898 |  | 
|  | 899 | static void suspend_targets(struct dm_table *t, unsigned postsuspend) | 
|  | 900 | { | 
|  | 901 | int i = t->num_targets; | 
|  | 902 | struct dm_target *ti = t->targets; | 
|  | 903 |  | 
|  | 904 | while (i--) { | 
|  | 905 | if (postsuspend) { | 
|  | 906 | if (ti->type->postsuspend) | 
|  | 907 | ti->type->postsuspend(ti); | 
|  | 908 | } else if (ti->type->presuspend) | 
|  | 909 | ti->type->presuspend(ti); | 
|  | 910 |  | 
|  | 911 | ti++; | 
|  | 912 | } | 
|  | 913 | } | 
|  | 914 |  | 
|  | 915 | void dm_table_presuspend_targets(struct dm_table *t) | 
|  | 916 | { | 
| Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 917 | if (!t) | 
|  | 918 | return; | 
|  | 919 |  | 
| Adrian Bunk | e8488d0 | 2008-04-24 22:10:51 +0100 | [diff] [blame] | 920 | suspend_targets(t, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | } | 
|  | 922 |  | 
|  | 923 | void dm_table_postsuspend_targets(struct dm_table *t) | 
|  | 924 | { | 
| Alasdair G Kergon | cf222b3 | 2005-07-28 21:15:57 -0700 | [diff] [blame] | 925 | if (!t) | 
|  | 926 | return; | 
|  | 927 |  | 
| Adrian Bunk | e8488d0 | 2008-04-24 22:10:51 +0100 | [diff] [blame] | 928 | suspend_targets(t, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | } | 
|  | 930 |  | 
| Milan Broz | 8757b77 | 2006-10-03 01:15:36 -0700 | [diff] [blame] | 931 | int dm_table_resume_targets(struct dm_table *t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | { | 
| Milan Broz | 8757b77 | 2006-10-03 01:15:36 -0700 | [diff] [blame] | 933 | int i, r = 0; | 
|  | 934 |  | 
|  | 935 | for (i = 0; i < t->num_targets; i++) { | 
|  | 936 | struct dm_target *ti = t->targets + i; | 
|  | 937 |  | 
|  | 938 | if (!ti->type->preresume) | 
|  | 939 | continue; | 
|  | 940 |  | 
|  | 941 | r = ti->type->preresume(ti); | 
|  | 942 | if (r) | 
|  | 943 | return r; | 
|  | 944 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 945 |  | 
|  | 946 | for (i = 0; i < t->num_targets; i++) { | 
|  | 947 | struct dm_target *ti = t->targets + i; | 
|  | 948 |  | 
|  | 949 | if (ti->type->resume) | 
|  | 950 | ti->type->resume(ti); | 
|  | 951 | } | 
| Milan Broz | 8757b77 | 2006-10-03 01:15:36 -0700 | [diff] [blame] | 952 |  | 
|  | 953 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | } | 
|  | 955 |  | 
|  | 956 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) | 
|  | 957 | { | 
| Paul Jimenez | afb2452 | 2008-02-08 02:09:59 +0000 | [diff] [blame] | 958 | struct dm_dev *dd; | 
|  | 959 | struct list_head *devices = dm_table_get_devices(t); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | int r = 0; | 
|  | 961 |  | 
| Paul Jimenez | afb2452 | 2008-02-08 02:09:59 +0000 | [diff] [blame] | 962 | list_for_each_entry(dd, devices, list) { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 963 | struct request_queue *q = bdev_get_queue(dd->bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | r |= bdi_congested(&q->backing_dev_info, bdi_bits); | 
|  | 965 | } | 
|  | 966 |  | 
|  | 967 | return r; | 
|  | 968 | } | 
|  | 969 |  | 
|  | 970 | void dm_table_unplug_all(struct dm_table *t) | 
|  | 971 | { | 
| Paul Jimenez | afb2452 | 2008-02-08 02:09:59 +0000 | [diff] [blame] | 972 | struct dm_dev *dd; | 
|  | 973 | struct list_head *devices = dm_table_get_devices(t); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 |  | 
| Paul Jimenez | afb2452 | 2008-02-08 02:09:59 +0000 | [diff] [blame] | 975 | list_for_each_entry(dd, devices, list) { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 976 | struct request_queue *q = bdev_get_queue(dd->bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 |  | 
| Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 978 | blk_unplug(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | } | 
|  | 980 | } | 
|  | 981 |  | 
| Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 982 | struct mapped_device *dm_table_get_md(struct dm_table *t) | 
|  | 983 | { | 
|  | 984 | dm_get(t->md); | 
|  | 985 |  | 
|  | 986 | return t->md; | 
|  | 987 | } | 
|  | 988 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | EXPORT_SYMBOL(dm_vcalloc); | 
|  | 990 | EXPORT_SYMBOL(dm_get_device); | 
|  | 991 | EXPORT_SYMBOL(dm_put_device); | 
|  | 992 | EXPORT_SYMBOL(dm_table_event); | 
| Alasdair G Kergon | d5e404c | 2005-07-12 15:53:05 -0700 | [diff] [blame] | 993 | EXPORT_SYMBOL(dm_table_get_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | EXPORT_SYMBOL(dm_table_get_mode); | 
| Mike Anderson | 1134e5a | 2006-03-27 01:17:54 -0800 | [diff] [blame] | 995 | EXPORT_SYMBOL(dm_table_get_md); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | EXPORT_SYMBOL(dm_table_put); | 
|  | 997 | EXPORT_SYMBOL(dm_table_get); | 
|  | 998 | EXPORT_SYMBOL(dm_table_unplug_all); |