blob: 55da5f0c56e3dddd9a14d7ac633b46d95135fef6 [file] [log] [blame]
Chris Mason0b86a832008-03-24 15:01:56 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/bio.h>
Chris Mason8a4b83c2008-03-24 15:02:07 -040020#include <linux/buffer_head.h>
Chris Masonf2d8d742008-04-21 10:03:05 -040021#include <linux/blkdev.h>
Chris Mason788f20e2008-04-28 15:29:42 -040022#include <linux/random.h>
Chris Mason593060d2008-03-25 16:50:33 -040023#include <asm/div64.h>
Chris Mason0b86a832008-03-24 15:01:56 -040024#include "ctree.h"
25#include "extent_map.h"
26#include "disk-io.h"
27#include "transaction.h"
28#include "print-tree.h"
29#include "volumes.h"
30
Chris Mason593060d2008-03-25 16:50:33 -040031struct map_lookup {
32 u64 type;
33 int io_align;
34 int io_width;
35 int stripe_len;
36 int sector_size;
37 int num_stripes;
Chris Mason321aecc2008-04-16 10:49:51 -040038 int sub_stripes;
Chris Masoncea9e442008-04-09 16:28:12 -040039 struct btrfs_bio_stripe stripes[];
Chris Mason593060d2008-03-25 16:50:33 -040040};
41
42#define map_lookup_size(n) (sizeof(struct map_lookup) + \
Chris Masoncea9e442008-04-09 16:28:12 -040043 (sizeof(struct btrfs_bio_stripe) * (n)))
Chris Mason593060d2008-03-25 16:50:33 -040044
Chris Mason8a4b83c2008-03-24 15:02:07 -040045static DEFINE_MUTEX(uuid_mutex);
46static LIST_HEAD(fs_uuids);
47
Chris Masona061fc82008-05-07 11:43:44 -040048void btrfs_lock_volumes(void)
49{
50 mutex_lock(&uuid_mutex);
51}
52
53void btrfs_unlock_volumes(void)
54{
55 mutex_unlock(&uuid_mutex);
56}
57
Chris Mason8a4b83c2008-03-24 15:02:07 -040058int btrfs_cleanup_fs_uuids(void)
59{
60 struct btrfs_fs_devices *fs_devices;
61 struct list_head *uuid_cur;
62 struct list_head *devices_cur;
63 struct btrfs_device *dev;
64
65 list_for_each(uuid_cur, &fs_uuids) {
66 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
67 list);
68 while(!list_empty(&fs_devices->devices)) {
69 devices_cur = fs_devices->devices.next;
70 dev = list_entry(devices_cur, struct btrfs_device,
71 dev_list);
Chris Mason8a4b83c2008-03-24 15:02:07 -040072 if (dev->bdev) {
Chris Mason8a4b83c2008-03-24 15:02:07 -040073 close_bdev_excl(dev->bdev);
74 }
75 list_del(&dev->dev_list);
76 kfree(dev);
77 }
78 }
79 return 0;
80}
81
Chris Masona4437552008-04-18 10:29:38 -040082static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
83 u8 *uuid)
Chris Mason8a4b83c2008-03-24 15:02:07 -040084{
85 struct btrfs_device *dev;
86 struct list_head *cur;
87
88 list_for_each(cur, head) {
89 dev = list_entry(cur, struct btrfs_device, dev_list);
Chris Masona4437552008-04-18 10:29:38 -040090 if (dev->devid == devid &&
Chris Mason8f18cf12008-04-25 16:53:30 -040091 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
Chris Mason8a4b83c2008-03-24 15:02:07 -040092 return dev;
Chris Masona4437552008-04-18 10:29:38 -040093 }
Chris Mason8a4b83c2008-03-24 15:02:07 -040094 }
95 return NULL;
96}
97
98static struct btrfs_fs_devices *find_fsid(u8 *fsid)
99{
100 struct list_head *cur;
101 struct btrfs_fs_devices *fs_devices;
102
103 list_for_each(cur, &fs_uuids) {
104 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
105 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
106 return fs_devices;
107 }
108 return NULL;
109}
110
111static int device_list_add(const char *path,
112 struct btrfs_super_block *disk_super,
113 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
114{
115 struct btrfs_device *device;
116 struct btrfs_fs_devices *fs_devices;
117 u64 found_transid = btrfs_super_generation(disk_super);
118
119 fs_devices = find_fsid(disk_super->fsid);
120 if (!fs_devices) {
121 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
122 if (!fs_devices)
123 return -ENOMEM;
124 INIT_LIST_HEAD(&fs_devices->devices);
Chris Masonb3075712008-04-22 09:22:07 -0400125 INIT_LIST_HEAD(&fs_devices->alloc_list);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400126 list_add(&fs_devices->list, &fs_uuids);
127 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
128 fs_devices->latest_devid = devid;
129 fs_devices->latest_trans = found_transid;
130 fs_devices->lowest_devid = (u64)-1;
131 fs_devices->num_devices = 0;
132 device = NULL;
133 } else {
Chris Masona4437552008-04-18 10:29:38 -0400134 device = __find_device(&fs_devices->devices, devid,
135 disk_super->dev_item.uuid);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400136 }
137 if (!device) {
138 device = kzalloc(sizeof(*device), GFP_NOFS);
139 if (!device) {
140 /* we can safely leave the fs_devices entry around */
141 return -ENOMEM;
142 }
143 device->devid = devid;
Chris Masona4437552008-04-18 10:29:38 -0400144 memcpy(device->uuid, disk_super->dev_item.uuid,
145 BTRFS_UUID_SIZE);
Chris Masonf2984462008-04-10 16:19:33 -0400146 device->barriers = 1;
Chris Masonb248a412008-04-14 09:48:18 -0400147 spin_lock_init(&device->io_lock);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400148 device->name = kstrdup(path, GFP_NOFS);
149 if (!device->name) {
150 kfree(device);
151 return -ENOMEM;
152 }
153 list_add(&device->dev_list, &fs_devices->devices);
Chris Masonb3075712008-04-22 09:22:07 -0400154 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400155 fs_devices->num_devices++;
156 }
157
158 if (found_transid > fs_devices->latest_trans) {
159 fs_devices->latest_devid = devid;
160 fs_devices->latest_trans = found_transid;
161 }
162 if (fs_devices->lowest_devid > devid) {
163 fs_devices->lowest_devid = devid;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400164 }
165 *fs_devices_ret = fs_devices;
166 return 0;
167}
168
169int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
170{
171 struct list_head *head = &fs_devices->devices;
172 struct list_head *cur;
173 struct btrfs_device *device;
174
175 mutex_lock(&uuid_mutex);
176 list_for_each(cur, head) {
177 device = list_entry(cur, struct btrfs_device, dev_list);
178 if (device->bdev) {
179 close_bdev_excl(device->bdev);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400180 }
181 device->bdev = NULL;
182 }
183 mutex_unlock(&uuid_mutex);
184 return 0;
185}
186
187int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
188 int flags, void *holder)
189{
190 struct block_device *bdev;
191 struct list_head *head = &fs_devices->devices;
192 struct list_head *cur;
193 struct btrfs_device *device;
194 int ret;
195
196 mutex_lock(&uuid_mutex);
197 list_for_each(cur, head) {
198 device = list_entry(cur, struct btrfs_device, dev_list);
199 bdev = open_bdev_excl(device->name, flags, holder);
Chris Masone17cade2008-04-15 15:41:47 -0400200
Chris Mason8a4b83c2008-03-24 15:02:07 -0400201 if (IS_ERR(bdev)) {
202 printk("open %s failed\n", device->name);
203 ret = PTR_ERR(bdev);
204 goto fail;
205 }
Chris Masona061fc82008-05-07 11:43:44 -0400206 set_blocksize(bdev, 4096);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400207 if (device->devid == fs_devices->latest_devid)
208 fs_devices->latest_bdev = bdev;
209 if (device->devid == fs_devices->lowest_devid) {
210 fs_devices->lowest_bdev = bdev;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400211 }
212 device->bdev = bdev;
Chris Masona061fc82008-05-07 11:43:44 -0400213
Chris Mason8a4b83c2008-03-24 15:02:07 -0400214 }
215 mutex_unlock(&uuid_mutex);
216 return 0;
217fail:
218 mutex_unlock(&uuid_mutex);
219 btrfs_close_devices(fs_devices);
220 return ret;
221}
222
223int btrfs_scan_one_device(const char *path, int flags, void *holder,
224 struct btrfs_fs_devices **fs_devices_ret)
225{
226 struct btrfs_super_block *disk_super;
227 struct block_device *bdev;
228 struct buffer_head *bh;
229 int ret;
230 u64 devid;
Chris Masonf2984462008-04-10 16:19:33 -0400231 u64 transid;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400232
233 mutex_lock(&uuid_mutex);
234
Chris Mason8a4b83c2008-03-24 15:02:07 -0400235 bdev = open_bdev_excl(path, flags, holder);
236
237 if (IS_ERR(bdev)) {
Chris Mason8a4b83c2008-03-24 15:02:07 -0400238 ret = PTR_ERR(bdev);
239 goto error;
240 }
241
242 ret = set_blocksize(bdev, 4096);
243 if (ret)
244 goto error_close;
245 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
246 if (!bh) {
247 ret = -EIO;
248 goto error_close;
249 }
250 disk_super = (struct btrfs_super_block *)bh->b_data;
251 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
252 sizeof(disk_super->magic))) {
Yane58ca022008-04-01 11:21:34 -0400253 ret = -EINVAL;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400254 goto error_brelse;
255 }
256 devid = le64_to_cpu(disk_super->dev_item.devid);
Chris Masonf2984462008-04-10 16:19:33 -0400257 transid = btrfs_super_generation(disk_super);
Chris Mason7ae9c092008-04-18 10:29:49 -0400258 if (disk_super->label[0])
259 printk("device label %s ", disk_super->label);
260 else {
261 /* FIXME, make a readl uuid parser */
262 printk("device fsid %llx-%llx ",
263 *(unsigned long long *)disk_super->fsid,
264 *(unsigned long long *)(disk_super->fsid + 8));
265 }
266 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400267 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
268
269error_brelse:
270 brelse(bh);
271error_close:
272 close_bdev_excl(bdev);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400273error:
274 mutex_unlock(&uuid_mutex);
275 return ret;
276}
Chris Mason0b86a832008-03-24 15:01:56 -0400277
278/*
279 * this uses a pretty simple search, the expectation is that it is
280 * called very infrequently and that a given device has a small number
281 * of extents
282 */
283static int find_free_dev_extent(struct btrfs_trans_handle *trans,
284 struct btrfs_device *device,
285 struct btrfs_path *path,
286 u64 num_bytes, u64 *start)
287{
288 struct btrfs_key key;
289 struct btrfs_root *root = device->dev_root;
290 struct btrfs_dev_extent *dev_extent = NULL;
291 u64 hole_size = 0;
292 u64 last_byte = 0;
293 u64 search_start = 0;
294 u64 search_end = device->total_bytes;
295 int ret;
296 int slot = 0;
297 int start_found;
298 struct extent_buffer *l;
299
300 start_found = 0;
301 path->reada = 2;
302
303 /* FIXME use last free of some kind */
304
Chris Mason8a4b83c2008-03-24 15:02:07 -0400305 /* we don't want to overwrite the superblock on the drive,
306 * so we make sure to start at an offset of at least 1MB
307 */
308 search_start = max((u64)1024 * 1024, search_start);
Chris Mason8f18cf12008-04-25 16:53:30 -0400309
310 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
311 search_start = max(root->fs_info->alloc_start, search_start);
312
Chris Mason0b86a832008-03-24 15:01:56 -0400313 key.objectid = device->devid;
314 key.offset = search_start;
315 key.type = BTRFS_DEV_EXTENT_KEY;
316 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
317 if (ret < 0)
318 goto error;
319 ret = btrfs_previous_item(root, path, 0, key.type);
320 if (ret < 0)
321 goto error;
322 l = path->nodes[0];
323 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
324 while (1) {
325 l = path->nodes[0];
326 slot = path->slots[0];
327 if (slot >= btrfs_header_nritems(l)) {
328 ret = btrfs_next_leaf(root, path);
329 if (ret == 0)
330 continue;
331 if (ret < 0)
332 goto error;
333no_more_items:
334 if (!start_found) {
335 if (search_start >= search_end) {
336 ret = -ENOSPC;
337 goto error;
338 }
339 *start = search_start;
340 start_found = 1;
341 goto check_pending;
342 }
343 *start = last_byte > search_start ?
344 last_byte : search_start;
345 if (search_end <= *start) {
346 ret = -ENOSPC;
347 goto error;
348 }
349 goto check_pending;
350 }
351 btrfs_item_key_to_cpu(l, &key, slot);
352
353 if (key.objectid < device->devid)
354 goto next;
355
356 if (key.objectid > device->devid)
357 goto no_more_items;
358
359 if (key.offset >= search_start && key.offset > last_byte &&
360 start_found) {
361 if (last_byte < search_start)
362 last_byte = search_start;
363 hole_size = key.offset - last_byte;
364 if (key.offset > last_byte &&
365 hole_size >= num_bytes) {
366 *start = last_byte;
367 goto check_pending;
368 }
369 }
370 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
371 goto next;
372 }
373
374 start_found = 1;
375 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
376 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
377next:
378 path->slots[0]++;
379 cond_resched();
380 }
381check_pending:
382 /* we have to make sure we didn't find an extent that has already
383 * been allocated by the map tree or the original allocation
384 */
385 btrfs_release_path(root, path);
386 BUG_ON(*start < search_start);
387
Chris Mason6324fbf2008-03-24 15:01:59 -0400388 if (*start + num_bytes > search_end) {
Chris Mason0b86a832008-03-24 15:01:56 -0400389 ret = -ENOSPC;
390 goto error;
391 }
392 /* check for pending inserts here */
393 return 0;
394
395error:
396 btrfs_release_path(root, path);
397 return ret;
398}
399
Chris Mason8f18cf12008-04-25 16:53:30 -0400400int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
401 struct btrfs_device *device,
402 u64 start)
403{
404 int ret;
405 struct btrfs_path *path;
406 struct btrfs_root *root = device->dev_root;
407 struct btrfs_key key;
Chris Masona061fc82008-05-07 11:43:44 -0400408 struct btrfs_key found_key;
409 struct extent_buffer *leaf = NULL;
410 struct btrfs_dev_extent *extent = NULL;
Chris Mason8f18cf12008-04-25 16:53:30 -0400411
412 path = btrfs_alloc_path();
413 if (!path)
414 return -ENOMEM;
415
416 key.objectid = device->devid;
417 key.offset = start;
418 key.type = BTRFS_DEV_EXTENT_KEY;
419
420 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Chris Masona061fc82008-05-07 11:43:44 -0400421 if (ret > 0) {
422 ret = btrfs_previous_item(root, path, key.objectid,
423 BTRFS_DEV_EXTENT_KEY);
424 BUG_ON(ret);
425 leaf = path->nodes[0];
426 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
427 extent = btrfs_item_ptr(leaf, path->slots[0],
428 struct btrfs_dev_extent);
429 BUG_ON(found_key.offset > start || found_key.offset +
430 btrfs_dev_extent_length(leaf, extent) < start);
431 ret = 0;
432 } else if (ret == 0) {
433 leaf = path->nodes[0];
434 extent = btrfs_item_ptr(leaf, path->slots[0],
435 struct btrfs_dev_extent);
436 }
Chris Mason8f18cf12008-04-25 16:53:30 -0400437 BUG_ON(ret);
438
Chris Masona061fc82008-05-07 11:43:44 -0400439 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
Chris Mason8f18cf12008-04-25 16:53:30 -0400440 ret = btrfs_del_item(trans, root, path);
441 BUG_ON(ret);
442
443 btrfs_free_path(path);
444 return ret;
445}
446
Chris Mason0b86a832008-03-24 15:01:56 -0400447int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
448 struct btrfs_device *device,
Chris Masone17cade2008-04-15 15:41:47 -0400449 u64 chunk_tree, u64 chunk_objectid,
450 u64 chunk_offset,
451 u64 num_bytes, u64 *start)
Chris Mason0b86a832008-03-24 15:01:56 -0400452{
453 int ret;
454 struct btrfs_path *path;
455 struct btrfs_root *root = device->dev_root;
456 struct btrfs_dev_extent *extent;
457 struct extent_buffer *leaf;
458 struct btrfs_key key;
459
460 path = btrfs_alloc_path();
461 if (!path)
462 return -ENOMEM;
463
464 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
Chris Mason6324fbf2008-03-24 15:01:59 -0400465 if (ret) {
Chris Mason0b86a832008-03-24 15:01:56 -0400466 goto err;
Chris Mason6324fbf2008-03-24 15:01:59 -0400467 }
Chris Mason0b86a832008-03-24 15:01:56 -0400468
469 key.objectid = device->devid;
470 key.offset = *start;
471 key.type = BTRFS_DEV_EXTENT_KEY;
472 ret = btrfs_insert_empty_item(trans, root, path, &key,
473 sizeof(*extent));
474 BUG_ON(ret);
475
476 leaf = path->nodes[0];
477 extent = btrfs_item_ptr(leaf, path->slots[0],
478 struct btrfs_dev_extent);
Chris Masone17cade2008-04-15 15:41:47 -0400479 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
480 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
481 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
482
483 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
484 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
485 BTRFS_UUID_SIZE);
486
Chris Mason0b86a832008-03-24 15:01:56 -0400487 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
488 btrfs_mark_buffer_dirty(leaf);
489err:
490 btrfs_free_path(path);
491 return ret;
492}
493
Chris Masone17cade2008-04-15 15:41:47 -0400494static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
Chris Mason0b86a832008-03-24 15:01:56 -0400495{
496 struct btrfs_path *path;
497 int ret;
498 struct btrfs_key key;
Chris Masone17cade2008-04-15 15:41:47 -0400499 struct btrfs_chunk *chunk;
Chris Mason0b86a832008-03-24 15:01:56 -0400500 struct btrfs_key found_key;
501
502 path = btrfs_alloc_path();
503 BUG_ON(!path);
504
Chris Masone17cade2008-04-15 15:41:47 -0400505 key.objectid = objectid;
Chris Mason0b86a832008-03-24 15:01:56 -0400506 key.offset = (u64)-1;
507 key.type = BTRFS_CHUNK_ITEM_KEY;
508
509 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
510 if (ret < 0)
511 goto error;
512
513 BUG_ON(ret == 0);
514
515 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
516 if (ret) {
Chris Masone17cade2008-04-15 15:41:47 -0400517 *offset = 0;
Chris Mason0b86a832008-03-24 15:01:56 -0400518 } else {
519 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
520 path->slots[0]);
Chris Masone17cade2008-04-15 15:41:47 -0400521 if (found_key.objectid != objectid)
522 *offset = 0;
523 else {
524 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
525 struct btrfs_chunk);
526 *offset = found_key.offset +
527 btrfs_chunk_length(path->nodes[0], chunk);
528 }
Chris Mason0b86a832008-03-24 15:01:56 -0400529 }
530 ret = 0;
531error:
532 btrfs_free_path(path);
533 return ret;
534}
535
Chris Mason0b86a832008-03-24 15:01:56 -0400536static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
537 u64 *objectid)
538{
539 int ret;
540 struct btrfs_key key;
541 struct btrfs_key found_key;
542
543 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
544 key.type = BTRFS_DEV_ITEM_KEY;
545 key.offset = (u64)-1;
546
547 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
548 if (ret < 0)
549 goto error;
550
551 BUG_ON(ret == 0);
552
553 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
554 BTRFS_DEV_ITEM_KEY);
555 if (ret) {
556 *objectid = 1;
557 } else {
558 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
559 path->slots[0]);
560 *objectid = found_key.offset + 1;
561 }
562 ret = 0;
563error:
564 btrfs_release_path(root, path);
565 return ret;
566}
567
568/*
569 * the device information is stored in the chunk root
570 * the btrfs_device struct should be fully filled in
571 */
572int btrfs_add_device(struct btrfs_trans_handle *trans,
573 struct btrfs_root *root,
574 struct btrfs_device *device)
575{
576 int ret;
577 struct btrfs_path *path;
578 struct btrfs_dev_item *dev_item;
579 struct extent_buffer *leaf;
580 struct btrfs_key key;
581 unsigned long ptr;
Chris Mason006a58a2008-05-02 14:43:15 -0400582 u64 free_devid = 0;
Chris Mason0b86a832008-03-24 15:01:56 -0400583
584 root = root->fs_info->chunk_root;
585
586 path = btrfs_alloc_path();
587 if (!path)
588 return -ENOMEM;
589
590 ret = find_next_devid(root, path, &free_devid);
591 if (ret)
592 goto out;
593
594 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
595 key.type = BTRFS_DEV_ITEM_KEY;
596 key.offset = free_devid;
597
598 ret = btrfs_insert_empty_item(trans, root, path, &key,
Chris Mason0d81ba52008-03-24 15:02:07 -0400599 sizeof(*dev_item));
Chris Mason0b86a832008-03-24 15:01:56 -0400600 if (ret)
601 goto out;
602
603 leaf = path->nodes[0];
604 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
605
Chris Mason8a4b83c2008-03-24 15:02:07 -0400606 device->devid = free_devid;
Chris Mason0b86a832008-03-24 15:01:56 -0400607 btrfs_set_device_id(leaf, dev_item, device->devid);
608 btrfs_set_device_type(leaf, dev_item, device->type);
609 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
610 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
611 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
Chris Mason0b86a832008-03-24 15:01:56 -0400612 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
613 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
Chris Masone17cade2008-04-15 15:41:47 -0400614 btrfs_set_device_group(leaf, dev_item, 0);
615 btrfs_set_device_seek_speed(leaf, dev_item, 0);
616 btrfs_set_device_bandwidth(leaf, dev_item, 0);
Chris Mason0b86a832008-03-24 15:01:56 -0400617
Chris Mason0b86a832008-03-24 15:01:56 -0400618 ptr = (unsigned long)btrfs_device_uuid(dev_item);
Chris Masone17cade2008-04-15 15:41:47 -0400619 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
Chris Mason0b86a832008-03-24 15:01:56 -0400620 btrfs_mark_buffer_dirty(leaf);
621 ret = 0;
622
623out:
624 btrfs_free_path(path);
625 return ret;
626}
Chris Mason8f18cf12008-04-25 16:53:30 -0400627
Chris Masona061fc82008-05-07 11:43:44 -0400628static int btrfs_rm_dev_item(struct btrfs_root *root,
629 struct btrfs_device *device)
630{
631 int ret;
632 struct btrfs_path *path;
633 struct block_device *bdev = device->bdev;
634 struct btrfs_device *next_dev;
635 struct btrfs_key key;
636 u64 total_bytes;
637 struct btrfs_fs_devices *fs_devices;
638 struct btrfs_trans_handle *trans;
639
640 root = root->fs_info->chunk_root;
641
642 path = btrfs_alloc_path();
643 if (!path)
644 return -ENOMEM;
645
646 trans = btrfs_start_transaction(root, 1);
647 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
648 key.type = BTRFS_DEV_ITEM_KEY;
649 key.offset = device->devid;
650
651 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
652 if (ret < 0)
653 goto out;
654
655 if (ret > 0) {
656 ret = -ENOENT;
657 goto out;
658 }
659
660 ret = btrfs_del_item(trans, root, path);
661 if (ret)
662 goto out;
663
664 /*
665 * at this point, the device is zero sized. We want to
666 * remove it from the devices list and zero out the old super
667 */
668 list_del_init(&device->dev_list);
669 list_del_init(&device->dev_alloc_list);
670 fs_devices = root->fs_info->fs_devices;
671
672 next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
673 dev_list);
674 if (bdev == fs_devices->lowest_bdev)
675 fs_devices->lowest_bdev = next_dev->bdev;
676 if (bdev == root->fs_info->sb->s_bdev)
677 root->fs_info->sb->s_bdev = next_dev->bdev;
678 if (bdev == fs_devices->latest_bdev)
679 fs_devices->latest_bdev = next_dev->bdev;
680
681 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
682 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
683 total_bytes - device->total_bytes);
684
685 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
686 btrfs_set_super_num_devices(&root->fs_info->super_copy,
687 total_bytes - 1);
688out:
689 btrfs_free_path(path);
690 btrfs_commit_transaction(trans, root);
691 return ret;
692}
693
694int btrfs_rm_device(struct btrfs_root *root, char *device_path)
695{
696 struct btrfs_device *device;
697 struct block_device *bdev;
698 struct buffer_head *bh;
699 struct btrfs_super_block *disk_super;
700 u64 all_avail;
701 u64 devid;
702 int ret = 0;
703
704 mutex_lock(&root->fs_info->fs_mutex);
705 mutex_lock(&uuid_mutex);
706
707 all_avail = root->fs_info->avail_data_alloc_bits |
708 root->fs_info->avail_system_alloc_bits |
709 root->fs_info->avail_metadata_alloc_bits;
710
711 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
712 root->fs_info->fs_devices->num_devices <= 4) {
713 printk("btrfs: unable to go below four devices on raid10\n");
714 ret = -EINVAL;
715 goto out;
716 }
717
718 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
719 root->fs_info->fs_devices->num_devices <= 2) {
720 printk("btrfs: unable to go below two devices on raid1\n");
721 ret = -EINVAL;
722 goto out;
723 }
724
725 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
726 if (IS_ERR(bdev)) {
727 ret = PTR_ERR(bdev);
728 goto out;
729 }
730
731 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
732 if (!bh) {
733 ret = -EIO;
734 goto error_close;
735 }
736 disk_super = (struct btrfs_super_block *)bh->b_data;
737 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
738 sizeof(disk_super->magic))) {
739 ret = -ENOENT;
740 goto error_brelse;
741 }
742 if (memcmp(disk_super->fsid, root->fs_info->fsid, BTRFS_FSID_SIZE)) {
743 ret = -ENOENT;
744 goto error_brelse;
745 }
746 devid = le64_to_cpu(disk_super->dev_item.devid);
747 device = btrfs_find_device(root, devid, NULL);
748 if (!device) {
749 ret = -ENOENT;
750 goto error_brelse;
751 }
752
753 root->fs_info->fs_devices->num_devices--;
754
755 ret = btrfs_shrink_device(device, 0);
756 if (ret)
757 goto error_brelse;
758
759
760 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
761 if (ret)
762 goto error_brelse;
763
764 /* make sure this device isn't detected as part of the FS anymore */
765 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
766 set_buffer_dirty(bh);
767 sync_dirty_buffer(bh);
768
769 brelse(bh);
770
771 /* one close for the device struct or super_block */
772 close_bdev_excl(device->bdev);
773
774 /* one close for us */
775 close_bdev_excl(device->bdev);
776
777 kfree(device->name);
778 kfree(device);
779 ret = 0;
780 goto out;
781
782error_brelse:
783 brelse(bh);
784error_close:
785 close_bdev_excl(bdev);
786out:
787 mutex_unlock(&uuid_mutex);
788 mutex_unlock(&root->fs_info->fs_mutex);
789 return ret;
790}
791
Chris Mason788f20e2008-04-28 15:29:42 -0400792int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
793{
794 struct btrfs_trans_handle *trans;
795 struct btrfs_device *device;
796 struct block_device *bdev;
797 struct list_head *cur;
798 struct list_head *devices;
799 u64 total_bytes;
800 int ret = 0;
801
802
803 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
804 if (!bdev) {
805 return -EIO;
806 }
807 mutex_lock(&root->fs_info->fs_mutex);
808 trans = btrfs_start_transaction(root, 1);
809 devices = &root->fs_info->fs_devices->devices;
810 list_for_each(cur, devices) {
811 device = list_entry(cur, struct btrfs_device, dev_list);
812 if (device->bdev == bdev) {
813 ret = -EEXIST;
814 goto out;
815 }
816 }
817
818 device = kzalloc(sizeof(*device), GFP_NOFS);
819 if (!device) {
820 /* we can safely leave the fs_devices entry around */
821 ret = -ENOMEM;
822 goto out_close_bdev;
823 }
824
825 device->barriers = 1;
826 generate_random_uuid(device->uuid);
827 spin_lock_init(&device->io_lock);
828 device->name = kstrdup(device_path, GFP_NOFS);
829 if (!device->name) {
830 kfree(device);
831 goto out_close_bdev;
832 }
833 device->io_width = root->sectorsize;
834 device->io_align = root->sectorsize;
835 device->sector_size = root->sectorsize;
836 device->total_bytes = i_size_read(bdev->bd_inode);
837 device->dev_root = root->fs_info->dev_root;
838 device->bdev = bdev;
839
840 ret = btrfs_add_device(trans, root, device);
841 if (ret)
842 goto out_close_bdev;
843
844 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
845 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
846 total_bytes + device->total_bytes);
847
848 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
849 btrfs_set_super_num_devices(&root->fs_info->super_copy,
850 total_bytes + 1);
851
852 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
853 list_add(&device->dev_alloc_list,
854 &root->fs_info->fs_devices->alloc_list);
855 root->fs_info->fs_devices->num_devices++;
856out:
857 btrfs_end_transaction(trans, root);
858 mutex_unlock(&root->fs_info->fs_mutex);
859 return ret;
860
861out_close_bdev:
862 close_bdev_excl(bdev);
863 goto out;
864}
865
Chris Mason0b86a832008-03-24 15:01:56 -0400866int btrfs_update_device(struct btrfs_trans_handle *trans,
867 struct btrfs_device *device)
868{
869 int ret;
870 struct btrfs_path *path;
871 struct btrfs_root *root;
872 struct btrfs_dev_item *dev_item;
873 struct extent_buffer *leaf;
874 struct btrfs_key key;
875
876 root = device->dev_root->fs_info->chunk_root;
877
878 path = btrfs_alloc_path();
879 if (!path)
880 return -ENOMEM;
881
882 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
883 key.type = BTRFS_DEV_ITEM_KEY;
884 key.offset = device->devid;
885
886 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
887 if (ret < 0)
888 goto out;
889
890 if (ret > 0) {
891 ret = -ENOENT;
892 goto out;
893 }
894
895 leaf = path->nodes[0];
896 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
897
898 btrfs_set_device_id(leaf, dev_item, device->devid);
899 btrfs_set_device_type(leaf, dev_item, device->type);
900 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
901 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
902 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
Chris Mason0b86a832008-03-24 15:01:56 -0400903 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
904 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
905 btrfs_mark_buffer_dirty(leaf);
906
907out:
908 btrfs_free_path(path);
909 return ret;
910}
911
Chris Mason8f18cf12008-04-25 16:53:30 -0400912int btrfs_grow_device(struct btrfs_trans_handle *trans,
913 struct btrfs_device *device, u64 new_size)
914{
915 struct btrfs_super_block *super_copy =
916 &device->dev_root->fs_info->super_copy;
917 u64 old_total = btrfs_super_total_bytes(super_copy);
918 u64 diff = new_size - device->total_bytes;
919
920 btrfs_set_super_total_bytes(super_copy, old_total + diff);
921 return btrfs_update_device(trans, device);
922}
923
924static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
925 struct btrfs_root *root,
926 u64 chunk_tree, u64 chunk_objectid,
927 u64 chunk_offset)
928{
929 int ret;
930 struct btrfs_path *path;
931 struct btrfs_key key;
932
933 root = root->fs_info->chunk_root;
934 path = btrfs_alloc_path();
935 if (!path)
936 return -ENOMEM;
937
938 key.objectid = chunk_objectid;
939 key.offset = chunk_offset;
940 key.type = BTRFS_CHUNK_ITEM_KEY;
941
942 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
943 BUG_ON(ret);
944
945 ret = btrfs_del_item(trans, root, path);
946 BUG_ON(ret);
947
948 btrfs_free_path(path);
949 return 0;
950}
951
952int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
953 chunk_offset)
954{
955 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
956 struct btrfs_disk_key *disk_key;
957 struct btrfs_chunk *chunk;
958 u8 *ptr;
959 int ret = 0;
960 u32 num_stripes;
961 u32 array_size;
962 u32 len = 0;
963 u32 cur;
964 struct btrfs_key key;
965
966 array_size = btrfs_super_sys_array_size(super_copy);
967
968 ptr = super_copy->sys_chunk_array;
969 cur = 0;
970
971 while (cur < array_size) {
972 disk_key = (struct btrfs_disk_key *)ptr;
973 btrfs_disk_key_to_cpu(&key, disk_key);
974
975 len = sizeof(*disk_key);
976
977 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
978 chunk = (struct btrfs_chunk *)(ptr + len);
979 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
980 len += btrfs_chunk_item_size(num_stripes);
981 } else {
982 ret = -EIO;
983 break;
984 }
985 if (key.objectid == chunk_objectid &&
986 key.offset == chunk_offset) {
987 memmove(ptr, ptr + len, array_size - (cur + len));
988 array_size -= len;
989 btrfs_set_super_sys_array_size(super_copy, array_size);
990 } else {
991 ptr += len;
992 cur += len;
993 }
994 }
995 return ret;
996}
997
998
999int btrfs_relocate_chunk(struct btrfs_root *root,
1000 u64 chunk_tree, u64 chunk_objectid,
1001 u64 chunk_offset)
1002{
1003 struct extent_map_tree *em_tree;
1004 struct btrfs_root *extent_root;
1005 struct btrfs_trans_handle *trans;
1006 struct extent_map *em;
1007 struct map_lookup *map;
1008 int ret;
1009 int i;
1010
1011 root = root->fs_info->chunk_root;
1012 extent_root = root->fs_info->extent_root;
1013 em_tree = &root->fs_info->mapping_tree.map_tree;
1014
1015 /* step one, relocate all the extents inside this chunk */
1016 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1017 BUG_ON(ret);
1018
1019 trans = btrfs_start_transaction(root, 1);
1020 BUG_ON(!trans);
1021
1022 /*
1023 * step two, delete the device extents and the
1024 * chunk tree entries
1025 */
1026 spin_lock(&em_tree->lock);
1027 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1028 spin_unlock(&em_tree->lock);
1029
Chris Masona061fc82008-05-07 11:43:44 -04001030 BUG_ON(em->start > chunk_offset ||
1031 em->start + em->len < chunk_offset);
Chris Mason8f18cf12008-04-25 16:53:30 -04001032 map = (struct map_lookup *)em->bdev;
1033
1034 for (i = 0; i < map->num_stripes; i++) {
1035 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1036 map->stripes[i].physical);
1037 BUG_ON(ret);
Chris Masona061fc82008-05-07 11:43:44 -04001038
1039 ret = btrfs_update_device(trans, map->stripes[i].dev);
1040 BUG_ON(ret);
Chris Mason8f18cf12008-04-25 16:53:30 -04001041 }
1042 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1043 chunk_offset);
1044
1045 BUG_ON(ret);
1046
1047 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1048 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1049 BUG_ON(ret);
Chris Mason8f18cf12008-04-25 16:53:30 -04001050 }
1051
Chris Mason8f18cf12008-04-25 16:53:30 -04001052 spin_lock(&em_tree->lock);
1053 remove_extent_mapping(em_tree, em);
1054 kfree(map);
1055 em->bdev = NULL;
1056
1057 /* once for the tree */
1058 free_extent_map(em);
1059 spin_unlock(&em_tree->lock);
1060
Chris Mason8f18cf12008-04-25 16:53:30 -04001061 /* once for us */
1062 free_extent_map(em);
1063
1064 btrfs_end_transaction(trans, root);
1065 return 0;
1066}
1067
Chris Masonec44a352008-04-28 15:29:52 -04001068static u64 div_factor(u64 num, int factor)
1069{
1070 if (factor == 10)
1071 return num;
1072 num *= factor;
1073 do_div(num, 10);
1074 return num;
1075}
1076
1077
1078int btrfs_balance(struct btrfs_root *dev_root)
1079{
1080 int ret;
1081 struct list_head *cur;
1082 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1083 struct btrfs_device *device;
1084 u64 old_size;
1085 u64 size_to_free;
1086 struct btrfs_path *path;
1087 struct btrfs_key key;
1088 struct btrfs_chunk *chunk;
1089 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1090 struct btrfs_trans_handle *trans;
1091 struct btrfs_key found_key;
1092
1093
1094 dev_root = dev_root->fs_info->dev_root;
1095
1096 mutex_lock(&dev_root->fs_info->fs_mutex);
1097 /* step one make some room on all the devices */
1098 list_for_each(cur, devices) {
1099 device = list_entry(cur, struct btrfs_device, dev_list);
1100 old_size = device->total_bytes;
1101 size_to_free = div_factor(old_size, 1);
1102 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1103 if (device->total_bytes - device->bytes_used > size_to_free)
1104 continue;
1105
1106 ret = btrfs_shrink_device(device, old_size - size_to_free);
1107 BUG_ON(ret);
1108
1109 trans = btrfs_start_transaction(dev_root, 1);
1110 BUG_ON(!trans);
1111
1112 ret = btrfs_grow_device(trans, device, old_size);
1113 BUG_ON(ret);
1114
1115 btrfs_end_transaction(trans, dev_root);
1116 }
1117
1118 /* step two, relocate all the chunks */
1119 path = btrfs_alloc_path();
1120 BUG_ON(!path);
1121
1122 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1123 key.offset = (u64)-1;
1124 key.type = BTRFS_CHUNK_ITEM_KEY;
1125
1126 while(1) {
1127 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1128 if (ret < 0)
1129 goto error;
1130
1131 /*
1132 * this shouldn't happen, it means the last relocate
1133 * failed
1134 */
1135 if (ret == 0)
1136 break;
1137
1138 ret = btrfs_previous_item(chunk_root, path, 0,
1139 BTRFS_CHUNK_ITEM_KEY);
1140 if (ret) {
1141 break;
1142 }
1143 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1144 path->slots[0]);
1145 if (found_key.objectid != key.objectid)
1146 break;
1147 chunk = btrfs_item_ptr(path->nodes[0],
1148 path->slots[0],
1149 struct btrfs_chunk);
1150 key.offset = found_key.offset;
1151 /* chunk zero is special */
1152 if (key.offset == 0)
1153 break;
1154
1155 ret = btrfs_relocate_chunk(chunk_root,
1156 chunk_root->root_key.objectid,
1157 found_key.objectid,
1158 found_key.offset);
1159 BUG_ON(ret);
1160 btrfs_release_path(chunk_root, path);
1161 }
1162 ret = 0;
1163error:
1164 btrfs_free_path(path);
1165 mutex_unlock(&dev_root->fs_info->fs_mutex);
1166 return ret;
1167}
1168
Chris Mason8f18cf12008-04-25 16:53:30 -04001169/*
1170 * shrinking a device means finding all of the device extents past
1171 * the new size, and then following the back refs to the chunks.
1172 * The chunk relocation code actually frees the device extent
1173 */
1174int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1175{
1176 struct btrfs_trans_handle *trans;
1177 struct btrfs_root *root = device->dev_root;
1178 struct btrfs_dev_extent *dev_extent = NULL;
1179 struct btrfs_path *path;
1180 u64 length;
1181 u64 chunk_tree;
1182 u64 chunk_objectid;
1183 u64 chunk_offset;
1184 int ret;
1185 int slot;
1186 struct extent_buffer *l;
1187 struct btrfs_key key;
1188 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1189 u64 old_total = btrfs_super_total_bytes(super_copy);
1190 u64 diff = device->total_bytes - new_size;
1191
1192
1193 path = btrfs_alloc_path();
1194 if (!path)
1195 return -ENOMEM;
1196
1197 trans = btrfs_start_transaction(root, 1);
1198 if (!trans) {
1199 ret = -ENOMEM;
1200 goto done;
1201 }
1202
1203 path->reada = 2;
1204
1205 device->total_bytes = new_size;
1206 ret = btrfs_update_device(trans, device);
1207 if (ret) {
1208 btrfs_end_transaction(trans, root);
1209 goto done;
1210 }
1211 WARN_ON(diff > old_total);
1212 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1213 btrfs_end_transaction(trans, root);
1214
1215 key.objectid = device->devid;
1216 key.offset = (u64)-1;
1217 key.type = BTRFS_DEV_EXTENT_KEY;
1218
1219 while (1) {
1220 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1221 if (ret < 0)
1222 goto done;
1223
1224 ret = btrfs_previous_item(root, path, 0, key.type);
1225 if (ret < 0)
1226 goto done;
1227 if (ret) {
1228 ret = 0;
1229 goto done;
1230 }
1231
1232 l = path->nodes[0];
1233 slot = path->slots[0];
1234 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1235
1236 if (key.objectid != device->devid)
1237 goto done;
1238
1239 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1240 length = btrfs_dev_extent_length(l, dev_extent);
1241
1242 if (key.offset + length <= new_size)
1243 goto done;
1244
1245 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1246 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1247 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1248 btrfs_release_path(root, path);
1249
1250 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1251 chunk_offset);
1252 if (ret)
1253 goto done;
1254 }
1255
1256done:
1257 btrfs_free_path(path);
1258 return ret;
1259}
1260
Chris Mason0b86a832008-03-24 15:01:56 -04001261int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1262 struct btrfs_root *root,
1263 struct btrfs_key *key,
1264 struct btrfs_chunk *chunk, int item_size)
1265{
1266 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1267 struct btrfs_disk_key disk_key;
1268 u32 array_size;
1269 u8 *ptr;
1270
1271 array_size = btrfs_super_sys_array_size(super_copy);
1272 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1273 return -EFBIG;
1274
1275 ptr = super_copy->sys_chunk_array + array_size;
1276 btrfs_cpu_key_to_disk(&disk_key, key);
1277 memcpy(ptr, &disk_key, sizeof(disk_key));
1278 ptr += sizeof(disk_key);
1279 memcpy(ptr, chunk, item_size);
1280 item_size += sizeof(disk_key);
1281 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1282 return 0;
1283}
1284
Chris Mason9b3f68b2008-04-18 10:29:51 -04001285static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1286 int sub_stripes)
1287{
1288 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1289 return calc_size;
1290 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1291 return calc_size * (num_stripes / sub_stripes);
1292 else
1293 return calc_size * num_stripes;
1294}
1295
1296
Chris Mason0b86a832008-03-24 15:01:56 -04001297int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1298 struct btrfs_root *extent_root, u64 *start,
Chris Mason6324fbf2008-03-24 15:01:59 -04001299 u64 *num_bytes, u64 type)
Chris Mason0b86a832008-03-24 15:01:56 -04001300{
1301 u64 dev_offset;
Chris Mason593060d2008-03-25 16:50:33 -04001302 struct btrfs_fs_info *info = extent_root->fs_info;
Chris Mason0b86a832008-03-24 15:01:56 -04001303 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
Chris Mason8f18cf12008-04-25 16:53:30 -04001304 struct btrfs_path *path;
Chris Mason0b86a832008-03-24 15:01:56 -04001305 struct btrfs_stripe *stripes;
1306 struct btrfs_device *device = NULL;
1307 struct btrfs_chunk *chunk;
Chris Mason6324fbf2008-03-24 15:01:59 -04001308 struct list_head private_devs;
Chris Masonb3075712008-04-22 09:22:07 -04001309 struct list_head *dev_list;
Chris Mason6324fbf2008-03-24 15:01:59 -04001310 struct list_head *cur;
Chris Mason0b86a832008-03-24 15:01:56 -04001311 struct extent_map_tree *em_tree;
1312 struct map_lookup *map;
1313 struct extent_map *em;
Chris Masona40a90a2008-04-18 11:55:51 -04001314 int min_stripe_size = 1 * 1024 * 1024;
Chris Mason0b86a832008-03-24 15:01:56 -04001315 u64 physical;
1316 u64 calc_size = 1024 * 1024 * 1024;
Chris Mason9b3f68b2008-04-18 10:29:51 -04001317 u64 max_chunk_size = calc_size;
1318 u64 min_free;
Chris Mason6324fbf2008-03-24 15:01:59 -04001319 u64 avail;
1320 u64 max_avail = 0;
Chris Mason9b3f68b2008-04-18 10:29:51 -04001321 u64 percent_max;
Chris Mason6324fbf2008-03-24 15:01:59 -04001322 int num_stripes = 1;
Chris Masona40a90a2008-04-18 11:55:51 -04001323 int min_stripes = 1;
Chris Mason321aecc2008-04-16 10:49:51 -04001324 int sub_stripes = 0;
Chris Mason6324fbf2008-03-24 15:01:59 -04001325 int looped = 0;
Chris Mason0b86a832008-03-24 15:01:56 -04001326 int ret;
Chris Mason6324fbf2008-03-24 15:01:59 -04001327 int index;
Chris Mason593060d2008-03-25 16:50:33 -04001328 int stripe_len = 64 * 1024;
Chris Mason0b86a832008-03-24 15:01:56 -04001329 struct btrfs_key key;
1330
Chris Masonec44a352008-04-28 15:29:52 -04001331 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1332 (type & BTRFS_BLOCK_GROUP_DUP)) {
1333 WARN_ON(1);
1334 type &= ~BTRFS_BLOCK_GROUP_DUP;
1335 }
Chris Masonb3075712008-04-22 09:22:07 -04001336 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
Chris Mason6324fbf2008-03-24 15:01:59 -04001337 if (list_empty(dev_list))
1338 return -ENOSPC;
Chris Mason593060d2008-03-25 16:50:33 -04001339
Chris Masona40a90a2008-04-18 11:55:51 -04001340 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
Chris Mason593060d2008-03-25 16:50:33 -04001341 num_stripes = btrfs_super_num_devices(&info->super_copy);
Chris Masona40a90a2008-04-18 11:55:51 -04001342 min_stripes = 2;
1343 }
1344 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
Chris Mason611f0e02008-04-03 16:29:03 -04001345 num_stripes = 2;
Chris Masona40a90a2008-04-18 11:55:51 -04001346 min_stripes = 2;
1347 }
Chris Mason8790d502008-04-03 16:29:03 -04001348 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1349 num_stripes = min_t(u64, 2,
1350 btrfs_super_num_devices(&info->super_copy));
Chris Mason9b3f68b2008-04-18 10:29:51 -04001351 if (num_stripes < 2)
1352 return -ENOSPC;
Chris Masona40a90a2008-04-18 11:55:51 -04001353 min_stripes = 2;
Chris Mason8790d502008-04-03 16:29:03 -04001354 }
Chris Mason321aecc2008-04-16 10:49:51 -04001355 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1356 num_stripes = btrfs_super_num_devices(&info->super_copy);
1357 if (num_stripes < 4)
1358 return -ENOSPC;
1359 num_stripes &= ~(u32)1;
1360 sub_stripes = 2;
Chris Masona40a90a2008-04-18 11:55:51 -04001361 min_stripes = 4;
Chris Mason321aecc2008-04-16 10:49:51 -04001362 }
Chris Mason9b3f68b2008-04-18 10:29:51 -04001363
1364 if (type & BTRFS_BLOCK_GROUP_DATA) {
1365 max_chunk_size = 10 * calc_size;
Chris Masona40a90a2008-04-18 11:55:51 -04001366 min_stripe_size = 64 * 1024 * 1024;
Chris Mason9b3f68b2008-04-18 10:29:51 -04001367 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1368 max_chunk_size = 4 * calc_size;
Chris Masona40a90a2008-04-18 11:55:51 -04001369 min_stripe_size = 32 * 1024 * 1024;
1370 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1371 calc_size = 8 * 1024 * 1024;
1372 max_chunk_size = calc_size * 2;
1373 min_stripe_size = 1 * 1024 * 1024;
Chris Mason9b3f68b2008-04-18 10:29:51 -04001374 }
1375
Chris Mason8f18cf12008-04-25 16:53:30 -04001376 path = btrfs_alloc_path();
1377 if (!path)
1378 return -ENOMEM;
1379
Chris Mason9b3f68b2008-04-18 10:29:51 -04001380 /* we don't want a chunk larger than 10% of the FS */
1381 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1382 max_chunk_size = min(percent_max, max_chunk_size);
1383
Chris Masona40a90a2008-04-18 11:55:51 -04001384again:
Chris Mason9b3f68b2008-04-18 10:29:51 -04001385 if (calc_size * num_stripes > max_chunk_size) {
1386 calc_size = max_chunk_size;
1387 do_div(calc_size, num_stripes);
1388 do_div(calc_size, stripe_len);
1389 calc_size *= stripe_len;
1390 }
1391 /* we don't want tiny stripes */
Chris Masona40a90a2008-04-18 11:55:51 -04001392 calc_size = max_t(u64, min_stripe_size, calc_size);
Chris Mason9b3f68b2008-04-18 10:29:51 -04001393
Chris Mason9b3f68b2008-04-18 10:29:51 -04001394 do_div(calc_size, stripe_len);
1395 calc_size *= stripe_len;
1396
Chris Mason6324fbf2008-03-24 15:01:59 -04001397 INIT_LIST_HEAD(&private_devs);
1398 cur = dev_list->next;
1399 index = 0;
Chris Mason611f0e02008-04-03 16:29:03 -04001400
1401 if (type & BTRFS_BLOCK_GROUP_DUP)
1402 min_free = calc_size * 2;
Chris Mason9b3f68b2008-04-18 10:29:51 -04001403 else
1404 min_free = calc_size;
Chris Mason611f0e02008-04-03 16:29:03 -04001405
Chris Masonad5bd912008-04-21 08:28:10 -04001406 /* we add 1MB because we never use the first 1MB of the device */
1407 min_free += 1024 * 1024;
1408
Chris Mason6324fbf2008-03-24 15:01:59 -04001409 /* build a private list of devices we will allocate from */
1410 while(index < num_stripes) {
Chris Masonb3075712008-04-22 09:22:07 -04001411 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
Chris Mason611f0e02008-04-03 16:29:03 -04001412
Chris Mason6324fbf2008-03-24 15:01:59 -04001413 avail = device->total_bytes - device->bytes_used;
1414 cur = cur->next;
Chris Mason8f18cf12008-04-25 16:53:30 -04001415
Chris Mason611f0e02008-04-03 16:29:03 -04001416 if (avail >= min_free) {
Chris Mason8f18cf12008-04-25 16:53:30 -04001417 u64 ignored_start = 0;
1418 ret = find_free_dev_extent(trans, device, path,
1419 min_free,
1420 &ignored_start);
1421 if (ret == 0) {
1422 list_move_tail(&device->dev_alloc_list,
1423 &private_devs);
Chris Mason611f0e02008-04-03 16:29:03 -04001424 index++;
Chris Mason8f18cf12008-04-25 16:53:30 -04001425 if (type & BTRFS_BLOCK_GROUP_DUP)
1426 index++;
1427 }
Chris Masona40a90a2008-04-18 11:55:51 -04001428 } else if (avail > max_avail)
1429 max_avail = avail;
Chris Mason6324fbf2008-03-24 15:01:59 -04001430 if (cur == dev_list)
1431 break;
1432 }
1433 if (index < num_stripes) {
1434 list_splice(&private_devs, dev_list);
Chris Masona40a90a2008-04-18 11:55:51 -04001435 if (index >= min_stripes) {
1436 num_stripes = index;
1437 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1438 num_stripes /= sub_stripes;
1439 num_stripes *= sub_stripes;
1440 }
1441 looped = 1;
1442 goto again;
1443 }
Chris Mason6324fbf2008-03-24 15:01:59 -04001444 if (!looped && max_avail > 0) {
1445 looped = 1;
1446 calc_size = max_avail;
1447 goto again;
1448 }
Chris Mason8f18cf12008-04-25 16:53:30 -04001449 btrfs_free_path(path);
Chris Mason6324fbf2008-03-24 15:01:59 -04001450 return -ENOSPC;
1451 }
Chris Masone17cade2008-04-15 15:41:47 -04001452 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1453 key.type = BTRFS_CHUNK_ITEM_KEY;
1454 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1455 &key.offset);
Chris Mason8f18cf12008-04-25 16:53:30 -04001456 if (ret) {
1457 btrfs_free_path(path);
Chris Mason0b86a832008-03-24 15:01:56 -04001458 return ret;
Chris Mason8f18cf12008-04-25 16:53:30 -04001459 }
Chris Mason0b86a832008-03-24 15:01:56 -04001460
Chris Mason0b86a832008-03-24 15:01:56 -04001461 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
Chris Mason8f18cf12008-04-25 16:53:30 -04001462 if (!chunk) {
1463 btrfs_free_path(path);
Chris Mason0b86a832008-03-24 15:01:56 -04001464 return -ENOMEM;
Chris Mason8f18cf12008-04-25 16:53:30 -04001465 }
Chris Mason0b86a832008-03-24 15:01:56 -04001466
Chris Mason593060d2008-03-25 16:50:33 -04001467 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1468 if (!map) {
1469 kfree(chunk);
Chris Mason8f18cf12008-04-25 16:53:30 -04001470 btrfs_free_path(path);
Chris Mason593060d2008-03-25 16:50:33 -04001471 return -ENOMEM;
1472 }
Chris Mason8f18cf12008-04-25 16:53:30 -04001473 btrfs_free_path(path);
1474 path = NULL;
Chris Mason593060d2008-03-25 16:50:33 -04001475
Chris Mason0b86a832008-03-24 15:01:56 -04001476 stripes = &chunk->stripe;
Chris Mason9b3f68b2008-04-18 10:29:51 -04001477 *num_bytes = chunk_bytes_by_type(type, calc_size,
1478 num_stripes, sub_stripes);
Chris Mason0b86a832008-03-24 15:01:56 -04001479
Chris Mason6324fbf2008-03-24 15:01:59 -04001480 index = 0;
Chris Mason0b86a832008-03-24 15:01:56 -04001481 while(index < num_stripes) {
Chris Masone17cade2008-04-15 15:41:47 -04001482 struct btrfs_stripe *stripe;
Chris Mason6324fbf2008-03-24 15:01:59 -04001483 BUG_ON(list_empty(&private_devs));
1484 cur = private_devs.next;
Chris Masonb3075712008-04-22 09:22:07 -04001485 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
Chris Mason611f0e02008-04-03 16:29:03 -04001486
1487 /* loop over this device again if we're doing a dup group */
1488 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1489 (index == num_stripes - 1))
Chris Masonb3075712008-04-22 09:22:07 -04001490 list_move_tail(&device->dev_alloc_list, dev_list);
Chris Mason0b86a832008-03-24 15:01:56 -04001491
1492 ret = btrfs_alloc_dev_extent(trans, device,
Chris Masone17cade2008-04-15 15:41:47 -04001493 info->chunk_root->root_key.objectid,
1494 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1495 calc_size, &dev_offset);
Chris Mason0b86a832008-03-24 15:01:56 -04001496 BUG_ON(ret);
Chris Mason0b86a832008-03-24 15:01:56 -04001497 device->bytes_used += calc_size;
1498 ret = btrfs_update_device(trans, device);
1499 BUG_ON(ret);
1500
Chris Mason593060d2008-03-25 16:50:33 -04001501 map->stripes[index].dev = device;
1502 map->stripes[index].physical = dev_offset;
Chris Masone17cade2008-04-15 15:41:47 -04001503 stripe = stripes + index;
1504 btrfs_set_stack_stripe_devid(stripe, device->devid);
1505 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1506 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
Chris Mason0b86a832008-03-24 15:01:56 -04001507 physical = dev_offset;
1508 index++;
1509 }
Chris Mason6324fbf2008-03-24 15:01:59 -04001510 BUG_ON(!list_empty(&private_devs));
Chris Mason0b86a832008-03-24 15:01:56 -04001511
Chris Masone17cade2008-04-15 15:41:47 -04001512 /* key was set above */
1513 btrfs_set_stack_chunk_length(chunk, *num_bytes);
Chris Mason0b86a832008-03-24 15:01:56 -04001514 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
Chris Mason593060d2008-03-25 16:50:33 -04001515 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
Chris Mason0b86a832008-03-24 15:01:56 -04001516 btrfs_set_stack_chunk_type(chunk, type);
1517 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
Chris Mason593060d2008-03-25 16:50:33 -04001518 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1519 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
Chris Mason0b86a832008-03-24 15:01:56 -04001520 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
Chris Mason321aecc2008-04-16 10:49:51 -04001521 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
Chris Mason593060d2008-03-25 16:50:33 -04001522 map->sector_size = extent_root->sectorsize;
1523 map->stripe_len = stripe_len;
1524 map->io_align = stripe_len;
1525 map->io_width = stripe_len;
1526 map->type = type;
1527 map->num_stripes = num_stripes;
Chris Mason321aecc2008-04-16 10:49:51 -04001528 map->sub_stripes = sub_stripes;
Chris Mason0b86a832008-03-24 15:01:56 -04001529
1530 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1531 btrfs_chunk_item_size(num_stripes));
1532 BUG_ON(ret);
Chris Masone17cade2008-04-15 15:41:47 -04001533 *start = key.offset;;
Chris Mason0b86a832008-03-24 15:01:56 -04001534
1535 em = alloc_extent_map(GFP_NOFS);
1536 if (!em)
1537 return -ENOMEM;
Chris Mason0b86a832008-03-24 15:01:56 -04001538 em->bdev = (struct block_device *)map;
Chris Masone17cade2008-04-15 15:41:47 -04001539 em->start = key.offset;
1540 em->len = *num_bytes;
Chris Mason0b86a832008-03-24 15:01:56 -04001541 em->block_start = 0;
1542
Chris Mason8f18cf12008-04-25 16:53:30 -04001543 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1544 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1545 chunk, btrfs_chunk_item_size(num_stripes));
1546 BUG_ON(ret);
1547 }
Chris Mason0b86a832008-03-24 15:01:56 -04001548 kfree(chunk);
1549
1550 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1551 spin_lock(&em_tree->lock);
1552 ret = add_extent_mapping(em_tree, em);
Chris Mason0b86a832008-03-24 15:01:56 -04001553 spin_unlock(&em_tree->lock);
Chris Masonb248a412008-04-14 09:48:18 -04001554 BUG_ON(ret);
Chris Mason0b86a832008-03-24 15:01:56 -04001555 free_extent_map(em);
1556 return ret;
1557}
1558
1559void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1560{
1561 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1562}
1563
1564void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1565{
1566 struct extent_map *em;
1567
1568 while(1) {
1569 spin_lock(&tree->map_tree.lock);
1570 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1571 if (em)
1572 remove_extent_mapping(&tree->map_tree, em);
1573 spin_unlock(&tree->map_tree.lock);
1574 if (!em)
1575 break;
1576 kfree(em->bdev);
1577 /* once for us */
1578 free_extent_map(em);
1579 /* once for the tree */
1580 free_extent_map(em);
1581 }
1582}
1583
Chris Masonf1885912008-04-09 16:28:12 -04001584int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1585{
1586 struct extent_map *em;
1587 struct map_lookup *map;
1588 struct extent_map_tree *em_tree = &map_tree->map_tree;
1589 int ret;
1590
1591 spin_lock(&em_tree->lock);
1592 em = lookup_extent_mapping(em_tree, logical, len);
Chris Masonb248a412008-04-14 09:48:18 -04001593 spin_unlock(&em_tree->lock);
Chris Masonf1885912008-04-09 16:28:12 -04001594 BUG_ON(!em);
1595
1596 BUG_ON(em->start > logical || em->start + em->len < logical);
1597 map = (struct map_lookup *)em->bdev;
1598 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1599 ret = map->num_stripes;
Chris Mason321aecc2008-04-16 10:49:51 -04001600 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1601 ret = map->sub_stripes;
Chris Masonf1885912008-04-09 16:28:12 -04001602 else
1603 ret = 1;
1604 free_extent_map(em);
Chris Masonf1885912008-04-09 16:28:12 -04001605 return ret;
1606}
1607
Chris Masonf2d8d742008-04-21 10:03:05 -04001608static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1609 u64 logical, u64 *length,
1610 struct btrfs_multi_bio **multi_ret,
1611 int mirror_num, struct page *unplug_page)
Chris Mason0b86a832008-03-24 15:01:56 -04001612{
1613 struct extent_map *em;
1614 struct map_lookup *map;
1615 struct extent_map_tree *em_tree = &map_tree->map_tree;
1616 u64 offset;
Chris Mason593060d2008-03-25 16:50:33 -04001617 u64 stripe_offset;
1618 u64 stripe_nr;
Chris Masoncea9e442008-04-09 16:28:12 -04001619 int stripes_allocated = 8;
Chris Mason321aecc2008-04-16 10:49:51 -04001620 int stripes_required = 1;
Chris Mason593060d2008-03-25 16:50:33 -04001621 int stripe_index;
Chris Masoncea9e442008-04-09 16:28:12 -04001622 int i;
Chris Masonf2d8d742008-04-21 10:03:05 -04001623 int num_stripes;
Chris Masona236aed2008-04-29 09:38:00 -04001624 int max_errors = 0;
Chris Masoncea9e442008-04-09 16:28:12 -04001625 struct btrfs_multi_bio *multi = NULL;
Chris Mason0b86a832008-03-24 15:01:56 -04001626
Chris Masoncea9e442008-04-09 16:28:12 -04001627 if (multi_ret && !(rw & (1 << BIO_RW))) {
1628 stripes_allocated = 1;
1629 }
1630again:
1631 if (multi_ret) {
1632 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1633 GFP_NOFS);
1634 if (!multi)
1635 return -ENOMEM;
Chris Masona236aed2008-04-29 09:38:00 -04001636
1637 atomic_set(&multi->error, 0);
Chris Masoncea9e442008-04-09 16:28:12 -04001638 }
Chris Mason0b86a832008-03-24 15:01:56 -04001639
1640 spin_lock(&em_tree->lock);
1641 em = lookup_extent_mapping(em_tree, logical, *length);
Chris Masonb248a412008-04-14 09:48:18 -04001642 spin_unlock(&em_tree->lock);
Chris Masonf2d8d742008-04-21 10:03:05 -04001643
1644 if (!em && unplug_page)
1645 return 0;
1646
Chris Mason3b951512008-04-17 11:29:12 -04001647 if (!em) {
Chris Masona061fc82008-05-07 11:43:44 -04001648 printk("unable to find logical %Lu len %Lu\n", logical, *length);
Chris Masonf2d8d742008-04-21 10:03:05 -04001649 BUG();
Chris Mason3b951512008-04-17 11:29:12 -04001650 }
Chris Mason0b86a832008-03-24 15:01:56 -04001651
1652 BUG_ON(em->start > logical || em->start + em->len < logical);
1653 map = (struct map_lookup *)em->bdev;
1654 offset = logical - em->start;
Chris Mason593060d2008-03-25 16:50:33 -04001655
Chris Masonf1885912008-04-09 16:28:12 -04001656 if (mirror_num > map->num_stripes)
1657 mirror_num = 0;
1658
Chris Masoncea9e442008-04-09 16:28:12 -04001659 /* if our multi bio struct is too small, back off and try again */
Chris Mason321aecc2008-04-16 10:49:51 -04001660 if (rw & (1 << BIO_RW)) {
1661 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1662 BTRFS_BLOCK_GROUP_DUP)) {
1663 stripes_required = map->num_stripes;
Chris Masona236aed2008-04-29 09:38:00 -04001664 max_errors = 1;
Chris Mason321aecc2008-04-16 10:49:51 -04001665 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1666 stripes_required = map->sub_stripes;
Chris Masona236aed2008-04-29 09:38:00 -04001667 max_errors = 1;
Chris Mason321aecc2008-04-16 10:49:51 -04001668 }
1669 }
1670 if (multi_ret && rw == WRITE &&
1671 stripes_allocated < stripes_required) {
Chris Masoncea9e442008-04-09 16:28:12 -04001672 stripes_allocated = map->num_stripes;
Chris Masoncea9e442008-04-09 16:28:12 -04001673 free_extent_map(em);
1674 kfree(multi);
1675 goto again;
1676 }
Chris Mason593060d2008-03-25 16:50:33 -04001677 stripe_nr = offset;
1678 /*
1679 * stripe_nr counts the total number of stripes we have to stride
1680 * to get to this block
1681 */
1682 do_div(stripe_nr, map->stripe_len);
1683
1684 stripe_offset = stripe_nr * map->stripe_len;
1685 BUG_ON(offset < stripe_offset);
1686
1687 /* stripe_offset is the offset of this block in its stripe*/
1688 stripe_offset = offset - stripe_offset;
1689
Chris Masoncea9e442008-04-09 16:28:12 -04001690 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
Chris Mason321aecc2008-04-16 10:49:51 -04001691 BTRFS_BLOCK_GROUP_RAID10 |
Chris Masoncea9e442008-04-09 16:28:12 -04001692 BTRFS_BLOCK_GROUP_DUP)) {
1693 /* we limit the length of each bio to what fits in a stripe */
1694 *length = min_t(u64, em->len - offset,
1695 map->stripe_len - stripe_offset);
1696 } else {
1697 *length = em->len - offset;
1698 }
Chris Masonf2d8d742008-04-21 10:03:05 -04001699
1700 if (!multi_ret && !unplug_page)
Chris Masoncea9e442008-04-09 16:28:12 -04001701 goto out;
1702
Chris Masonf2d8d742008-04-21 10:03:05 -04001703 num_stripes = 1;
Chris Masoncea9e442008-04-09 16:28:12 -04001704 stripe_index = 0;
Chris Mason8790d502008-04-03 16:29:03 -04001705 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
Chris Masonf2d8d742008-04-21 10:03:05 -04001706 if (unplug_page || (rw & (1 << BIO_RW)))
1707 num_stripes = map->num_stripes;
Chris Mason2fff7342008-04-29 14:12:09 -04001708 else if (mirror_num)
Chris Masonf1885912008-04-09 16:28:12 -04001709 stripe_index = mirror_num - 1;
Chris Mason2fff7342008-04-29 14:12:09 -04001710 else
1711 stripe_index = current->pid % map->num_stripes;
1712
Chris Mason611f0e02008-04-03 16:29:03 -04001713 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
Chris Masoncea9e442008-04-09 16:28:12 -04001714 if (rw & (1 << BIO_RW))
Chris Masonf2d8d742008-04-21 10:03:05 -04001715 num_stripes = map->num_stripes;
Chris Masonf1885912008-04-09 16:28:12 -04001716 else if (mirror_num)
1717 stripe_index = mirror_num - 1;
Chris Mason2fff7342008-04-29 14:12:09 -04001718
Chris Mason321aecc2008-04-16 10:49:51 -04001719 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1720 int factor = map->num_stripes / map->sub_stripes;
Chris Mason321aecc2008-04-16 10:49:51 -04001721
1722 stripe_index = do_div(stripe_nr, factor);
1723 stripe_index *= map->sub_stripes;
1724
Chris Masonf2d8d742008-04-21 10:03:05 -04001725 if (unplug_page || (rw & (1 << BIO_RW)))
1726 num_stripes = map->sub_stripes;
Chris Mason321aecc2008-04-16 10:49:51 -04001727 else if (mirror_num)
1728 stripe_index += mirror_num - 1;
Chris Mason2fff7342008-04-29 14:12:09 -04001729 else
1730 stripe_index += current->pid % map->sub_stripes;
Chris Mason8790d502008-04-03 16:29:03 -04001731 } else {
1732 /*
1733 * after this do_div call, stripe_nr is the number of stripes
1734 * on this device we have to walk to find the data, and
1735 * stripe_index is the number of our device in the stripe array
1736 */
1737 stripe_index = do_div(stripe_nr, map->num_stripes);
1738 }
Chris Mason593060d2008-03-25 16:50:33 -04001739 BUG_ON(stripe_index >= map->num_stripes);
Chris Mason593060d2008-03-25 16:50:33 -04001740
Chris Masonf2d8d742008-04-21 10:03:05 -04001741 for (i = 0; i < num_stripes; i++) {
1742 if (unplug_page) {
1743 struct btrfs_device *device;
1744 struct backing_dev_info *bdi;
1745
1746 device = map->stripes[stripe_index].dev;
1747 bdi = blk_get_backing_dev_info(device->bdev);
1748 if (bdi->unplug_io_fn) {
1749 bdi->unplug_io_fn(bdi, unplug_page);
1750 }
1751 } else {
1752 multi->stripes[i].physical =
1753 map->stripes[stripe_index].physical +
1754 stripe_offset + stripe_nr * map->stripe_len;
1755 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1756 }
Chris Masoncea9e442008-04-09 16:28:12 -04001757 stripe_index++;
Chris Mason593060d2008-03-25 16:50:33 -04001758 }
Chris Masonf2d8d742008-04-21 10:03:05 -04001759 if (multi_ret) {
1760 *multi_ret = multi;
1761 multi->num_stripes = num_stripes;
Chris Masona236aed2008-04-29 09:38:00 -04001762 multi->max_errors = max_errors;
Chris Masonf2d8d742008-04-21 10:03:05 -04001763 }
Chris Masoncea9e442008-04-09 16:28:12 -04001764out:
Chris Mason0b86a832008-03-24 15:01:56 -04001765 free_extent_map(em);
Chris Mason0b86a832008-03-24 15:01:56 -04001766 return 0;
1767}
1768
Chris Masonf2d8d742008-04-21 10:03:05 -04001769int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1770 u64 logical, u64 *length,
1771 struct btrfs_multi_bio **multi_ret, int mirror_num)
1772{
1773 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
1774 mirror_num, NULL);
1775}
1776
1777int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
1778 u64 logical, struct page *page)
1779{
1780 u64 length = PAGE_CACHE_SIZE;
1781 return __btrfs_map_block(map_tree, READ, logical, &length,
1782 NULL, 0, page);
1783}
1784
1785
Chris Mason8790d502008-04-03 16:29:03 -04001786#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1787static void end_bio_multi_stripe(struct bio *bio, int err)
1788#else
1789static int end_bio_multi_stripe(struct bio *bio,
1790 unsigned int bytes_done, int err)
1791#endif
1792{
Chris Masoncea9e442008-04-09 16:28:12 -04001793 struct btrfs_multi_bio *multi = bio->bi_private;
Chris Mason8790d502008-04-03 16:29:03 -04001794
1795#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1796 if (bio->bi_size)
1797 return 1;
1798#endif
1799 if (err)
Chris Masona236aed2008-04-29 09:38:00 -04001800 atomic_inc(&multi->error);
Chris Mason8790d502008-04-03 16:29:03 -04001801
Chris Masoncea9e442008-04-09 16:28:12 -04001802 if (atomic_dec_and_test(&multi->stripes_pending)) {
Chris Mason8790d502008-04-03 16:29:03 -04001803 bio->bi_private = multi->private;
1804 bio->bi_end_io = multi->end_io;
1805
Chris Masona236aed2008-04-29 09:38:00 -04001806 /* only send an error to the higher layers if it is
1807 * beyond the tolerance of the multi-bio
1808 */
1809 if (atomic_read(&multi->error) > multi->max_errors)
1810 err = -EIO;
1811 else
1812 err = 0;
Chris Mason8790d502008-04-03 16:29:03 -04001813 kfree(multi);
1814
Miguel73f61b22008-04-11 15:50:59 -04001815#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1816 bio_endio(bio, bio->bi_size, err);
1817#else
Chris Mason8790d502008-04-03 16:29:03 -04001818 bio_endio(bio, err);
Miguel73f61b22008-04-11 15:50:59 -04001819#endif
Chris Mason8790d502008-04-03 16:29:03 -04001820 } else {
1821 bio_put(bio);
1822 }
1823#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1824 return 0;
1825#endif
1826}
1827
Chris Masonf1885912008-04-09 16:28:12 -04001828int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
1829 int mirror_num)
Chris Mason0b86a832008-03-24 15:01:56 -04001830{
1831 struct btrfs_mapping_tree *map_tree;
1832 struct btrfs_device *dev;
Chris Mason8790d502008-04-03 16:29:03 -04001833 struct bio *first_bio = bio;
Chris Mason0b86a832008-03-24 15:01:56 -04001834 u64 logical = bio->bi_sector << 9;
Chris Mason0b86a832008-03-24 15:01:56 -04001835 u64 length = 0;
1836 u64 map_length;
Chris Masoncea9e442008-04-09 16:28:12 -04001837 struct btrfs_multi_bio *multi = NULL;
Chris Mason0b86a832008-03-24 15:01:56 -04001838 int ret;
Chris Mason8790d502008-04-03 16:29:03 -04001839 int dev_nr = 0;
1840 int total_devs = 1;
Chris Mason0b86a832008-03-24 15:01:56 -04001841
Chris Masonf2d8d742008-04-21 10:03:05 -04001842 length = bio->bi_size;
Chris Mason0b86a832008-03-24 15:01:56 -04001843 map_tree = &root->fs_info->mapping_tree;
1844 map_length = length;
Chris Masoncea9e442008-04-09 16:28:12 -04001845
Chris Masonf1885912008-04-09 16:28:12 -04001846 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
1847 mirror_num);
Chris Masoncea9e442008-04-09 16:28:12 -04001848 BUG_ON(ret);
1849
1850 total_devs = multi->num_stripes;
1851 if (map_length < length) {
1852 printk("mapping failed logical %Lu bio len %Lu "
1853 "len %Lu\n", logical, length, map_length);
1854 BUG();
1855 }
1856 multi->end_io = first_bio->bi_end_io;
1857 multi->private = first_bio->bi_private;
1858 atomic_set(&multi->stripes_pending, multi->num_stripes);
1859
Chris Mason8790d502008-04-03 16:29:03 -04001860 while(dev_nr < total_devs) {
Chris Mason8790d502008-04-03 16:29:03 -04001861 if (total_devs > 1) {
Chris Mason8790d502008-04-03 16:29:03 -04001862 if (dev_nr < total_devs - 1) {
1863 bio = bio_clone(first_bio, GFP_NOFS);
1864 BUG_ON(!bio);
1865 } else {
1866 bio = first_bio;
1867 }
1868 bio->bi_private = multi;
1869 bio->bi_end_io = end_bio_multi_stripe;
1870 }
Chris Masoncea9e442008-04-09 16:28:12 -04001871 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
1872 dev = multi->stripes[dev_nr].dev;
Chris Masone1c4b742008-04-22 13:26:46 -04001873
Chris Mason8790d502008-04-03 16:29:03 -04001874 bio->bi_bdev = dev->bdev;
1875 spin_lock(&dev->io_lock);
1876 dev->total_ios++;
1877 spin_unlock(&dev->io_lock);
1878 submit_bio(rw, bio);
1879 dev_nr++;
Chris Mason239b14b2008-03-24 15:02:07 -04001880 }
Chris Masoncea9e442008-04-09 16:28:12 -04001881 if (total_devs == 1)
1882 kfree(multi);
Chris Mason0b86a832008-03-24 15:01:56 -04001883 return 0;
1884}
1885
Chris Masona4437552008-04-18 10:29:38 -04001886struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
1887 u8 *uuid)
Chris Mason0b86a832008-03-24 15:01:56 -04001888{
Chris Mason8a4b83c2008-03-24 15:02:07 -04001889 struct list_head *head = &root->fs_info->fs_devices->devices;
Chris Mason0b86a832008-03-24 15:01:56 -04001890
Chris Masona4437552008-04-18 10:29:38 -04001891 return __find_device(head, devid, uuid);
Chris Mason0b86a832008-03-24 15:01:56 -04001892}
1893
1894static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
1895 struct extent_buffer *leaf,
1896 struct btrfs_chunk *chunk)
1897{
1898 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1899 struct map_lookup *map;
1900 struct extent_map *em;
1901 u64 logical;
1902 u64 length;
1903 u64 devid;
Chris Masona4437552008-04-18 10:29:38 -04001904 u8 uuid[BTRFS_UUID_SIZE];
Chris Mason593060d2008-03-25 16:50:33 -04001905 int num_stripes;
Chris Mason0b86a832008-03-24 15:01:56 -04001906 int ret;
Chris Mason593060d2008-03-25 16:50:33 -04001907 int i;
Chris Mason0b86a832008-03-24 15:01:56 -04001908
Chris Masone17cade2008-04-15 15:41:47 -04001909 logical = key->offset;
1910 length = btrfs_chunk_length(leaf, chunk);
Chris Masona061fc82008-05-07 11:43:44 -04001911
Chris Mason0b86a832008-03-24 15:01:56 -04001912 spin_lock(&map_tree->map_tree.lock);
1913 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
Chris Masonb248a412008-04-14 09:48:18 -04001914 spin_unlock(&map_tree->map_tree.lock);
Chris Mason0b86a832008-03-24 15:01:56 -04001915
1916 /* already mapped? */
1917 if (em && em->start <= logical && em->start + em->len > logical) {
1918 free_extent_map(em);
Chris Mason0b86a832008-03-24 15:01:56 -04001919 return 0;
1920 } else if (em) {
1921 free_extent_map(em);
1922 }
Chris Mason0b86a832008-03-24 15:01:56 -04001923
1924 map = kzalloc(sizeof(*map), GFP_NOFS);
1925 if (!map)
1926 return -ENOMEM;
1927
1928 em = alloc_extent_map(GFP_NOFS);
1929 if (!em)
1930 return -ENOMEM;
Chris Mason593060d2008-03-25 16:50:33 -04001931 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
1932 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
Chris Mason0b86a832008-03-24 15:01:56 -04001933 if (!map) {
1934 free_extent_map(em);
1935 return -ENOMEM;
1936 }
1937
1938 em->bdev = (struct block_device *)map;
1939 em->start = logical;
1940 em->len = length;
1941 em->block_start = 0;
1942
Chris Mason593060d2008-03-25 16:50:33 -04001943 map->num_stripes = num_stripes;
1944 map->io_width = btrfs_chunk_io_width(leaf, chunk);
1945 map->io_align = btrfs_chunk_io_align(leaf, chunk);
1946 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
1947 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
1948 map->type = btrfs_chunk_type(leaf, chunk);
Chris Mason321aecc2008-04-16 10:49:51 -04001949 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
Chris Mason593060d2008-03-25 16:50:33 -04001950 for (i = 0; i < num_stripes; i++) {
1951 map->stripes[i].physical =
1952 btrfs_stripe_offset_nr(leaf, chunk, i);
1953 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
Chris Masona4437552008-04-18 10:29:38 -04001954 read_extent_buffer(leaf, uuid, (unsigned long)
1955 btrfs_stripe_dev_uuid_nr(chunk, i),
1956 BTRFS_UUID_SIZE);
1957 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
Chris Mason593060d2008-03-25 16:50:33 -04001958 if (!map->stripes[i].dev) {
1959 kfree(map);
1960 free_extent_map(em);
1961 return -EIO;
1962 }
Chris Mason0b86a832008-03-24 15:01:56 -04001963 }
1964
1965 spin_lock(&map_tree->map_tree.lock);
1966 ret = add_extent_mapping(&map_tree->map_tree, em);
Chris Mason0b86a832008-03-24 15:01:56 -04001967 spin_unlock(&map_tree->map_tree.lock);
Chris Masonb248a412008-04-14 09:48:18 -04001968 BUG_ON(ret);
Chris Mason0b86a832008-03-24 15:01:56 -04001969 free_extent_map(em);
1970
1971 return 0;
1972}
1973
1974static int fill_device_from_item(struct extent_buffer *leaf,
1975 struct btrfs_dev_item *dev_item,
1976 struct btrfs_device *device)
1977{
1978 unsigned long ptr;
Chris Mason0b86a832008-03-24 15:01:56 -04001979
1980 device->devid = btrfs_device_id(leaf, dev_item);
1981 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
1982 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
1983 device->type = btrfs_device_type(leaf, dev_item);
1984 device->io_align = btrfs_device_io_align(leaf, dev_item);
1985 device->io_width = btrfs_device_io_width(leaf, dev_item);
1986 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
Chris Mason0b86a832008-03-24 15:01:56 -04001987
1988 ptr = (unsigned long)btrfs_device_uuid(dev_item);
Chris Masone17cade2008-04-15 15:41:47 -04001989 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
Chris Mason0b86a832008-03-24 15:01:56 -04001990
Chris Mason0b86a832008-03-24 15:01:56 -04001991 return 0;
1992}
1993
Chris Mason0d81ba52008-03-24 15:02:07 -04001994static int read_one_dev(struct btrfs_root *root,
Chris Mason0b86a832008-03-24 15:01:56 -04001995 struct extent_buffer *leaf,
1996 struct btrfs_dev_item *dev_item)
1997{
1998 struct btrfs_device *device;
1999 u64 devid;
2000 int ret;
Chris Masona4437552008-04-18 10:29:38 -04002001 u8 dev_uuid[BTRFS_UUID_SIZE];
2002
Chris Mason0b86a832008-03-24 15:01:56 -04002003 devid = btrfs_device_id(leaf, dev_item);
Chris Masona4437552008-04-18 10:29:38 -04002004 read_extent_buffer(leaf, dev_uuid,
2005 (unsigned long)btrfs_device_uuid(dev_item),
2006 BTRFS_UUID_SIZE);
2007 device = btrfs_find_device(root, devid, dev_uuid);
Chris Mason6324fbf2008-03-24 15:01:59 -04002008 if (!device) {
Chris Mason8a4b83c2008-03-24 15:02:07 -04002009 printk("warning devid %Lu not found already\n", devid);
Chris Masonf2984462008-04-10 16:19:33 -04002010 device = kzalloc(sizeof(*device), GFP_NOFS);
Chris Mason6324fbf2008-03-24 15:01:59 -04002011 if (!device)
2012 return -ENOMEM;
Chris Mason8a4b83c2008-03-24 15:02:07 -04002013 list_add(&device->dev_list,
2014 &root->fs_info->fs_devices->devices);
Chris Masonb3075712008-04-22 09:22:07 -04002015 list_add(&device->dev_alloc_list,
2016 &root->fs_info->fs_devices->alloc_list);
Chris Masonb248a412008-04-14 09:48:18 -04002017 device->barriers = 1;
Chris Mason8790d502008-04-03 16:29:03 -04002018 spin_lock_init(&device->io_lock);
Chris Mason6324fbf2008-03-24 15:01:59 -04002019 }
Chris Mason0b86a832008-03-24 15:01:56 -04002020
2021 fill_device_from_item(leaf, dev_item, device);
2022 device->dev_root = root->fs_info->dev_root;
Chris Mason0b86a832008-03-24 15:01:56 -04002023 ret = 0;
2024#if 0
2025 ret = btrfs_open_device(device);
2026 if (ret) {
2027 kfree(device);
2028 }
2029#endif
2030 return ret;
2031}
2032
Chris Mason0d81ba52008-03-24 15:02:07 -04002033int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2034{
2035 struct btrfs_dev_item *dev_item;
2036
2037 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2038 dev_item);
2039 return read_one_dev(root, buf, dev_item);
2040}
2041
Chris Mason0b86a832008-03-24 15:01:56 -04002042int btrfs_read_sys_array(struct btrfs_root *root)
2043{
2044 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
Chris Masona061fc82008-05-07 11:43:44 -04002045 struct extent_buffer *sb;
Chris Mason0b86a832008-03-24 15:01:56 -04002046 struct btrfs_disk_key *disk_key;
Chris Mason0b86a832008-03-24 15:01:56 -04002047 struct btrfs_chunk *chunk;
Chris Mason84eed902008-04-25 09:04:37 -04002048 u8 *ptr;
2049 unsigned long sb_ptr;
2050 int ret = 0;
Chris Mason0b86a832008-03-24 15:01:56 -04002051 u32 num_stripes;
2052 u32 array_size;
2053 u32 len = 0;
Chris Mason0b86a832008-03-24 15:01:56 -04002054 u32 cur;
Chris Mason84eed902008-04-25 09:04:37 -04002055 struct btrfs_key key;
Chris Mason0b86a832008-03-24 15:01:56 -04002056
Chris Masona061fc82008-05-07 11:43:44 -04002057 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2058 BTRFS_SUPER_INFO_SIZE);
2059 if (!sb)
2060 return -ENOMEM;
2061 btrfs_set_buffer_uptodate(sb);
2062 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
Chris Mason0b86a832008-03-24 15:01:56 -04002063 array_size = btrfs_super_sys_array_size(super_copy);
2064
Chris Mason0b86a832008-03-24 15:01:56 -04002065 ptr = super_copy->sys_chunk_array;
2066 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2067 cur = 0;
2068
2069 while (cur < array_size) {
2070 disk_key = (struct btrfs_disk_key *)ptr;
2071 btrfs_disk_key_to_cpu(&key, disk_key);
2072
Chris Masona061fc82008-05-07 11:43:44 -04002073 len = sizeof(*disk_key); ptr += len;
Chris Mason0b86a832008-03-24 15:01:56 -04002074 sb_ptr += len;
2075 cur += len;
2076
Chris Mason0d81ba52008-03-24 15:02:07 -04002077 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
Chris Mason0b86a832008-03-24 15:01:56 -04002078 chunk = (struct btrfs_chunk *)sb_ptr;
Chris Mason0d81ba52008-03-24 15:02:07 -04002079 ret = read_one_chunk(root, &key, sb, chunk);
Chris Mason84eed902008-04-25 09:04:37 -04002080 if (ret)
2081 break;
Chris Mason0b86a832008-03-24 15:01:56 -04002082 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2083 len = btrfs_chunk_item_size(num_stripes);
2084 } else {
Chris Mason84eed902008-04-25 09:04:37 -04002085 ret = -EIO;
2086 break;
Chris Mason0b86a832008-03-24 15:01:56 -04002087 }
2088 ptr += len;
2089 sb_ptr += len;
2090 cur += len;
2091 }
Chris Masona061fc82008-05-07 11:43:44 -04002092 free_extent_buffer(sb);
Chris Mason84eed902008-04-25 09:04:37 -04002093 return ret;
Chris Mason0b86a832008-03-24 15:01:56 -04002094}
2095
2096int btrfs_read_chunk_tree(struct btrfs_root *root)
2097{
2098 struct btrfs_path *path;
2099 struct extent_buffer *leaf;
2100 struct btrfs_key key;
2101 struct btrfs_key found_key;
2102 int ret;
2103 int slot;
2104
2105 root = root->fs_info->chunk_root;
2106
2107 path = btrfs_alloc_path();
2108 if (!path)
2109 return -ENOMEM;
2110
2111 /* first we search for all of the device items, and then we
2112 * read in all of the chunk items. This way we can create chunk
2113 * mappings that reference all of the devices that are afound
2114 */
2115 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2116 key.offset = 0;
2117 key.type = 0;
2118again:
2119 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2120 while(1) {
2121 leaf = path->nodes[0];
2122 slot = path->slots[0];
2123 if (slot >= btrfs_header_nritems(leaf)) {
2124 ret = btrfs_next_leaf(root, path);
2125 if (ret == 0)
2126 continue;
2127 if (ret < 0)
2128 goto error;
2129 break;
2130 }
2131 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2132 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2133 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2134 break;
2135 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2136 struct btrfs_dev_item *dev_item;
2137 dev_item = btrfs_item_ptr(leaf, slot,
2138 struct btrfs_dev_item);
Chris Mason0d81ba52008-03-24 15:02:07 -04002139 ret = read_one_dev(root, leaf, dev_item);
Chris Mason0b86a832008-03-24 15:01:56 -04002140 BUG_ON(ret);
2141 }
2142 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2143 struct btrfs_chunk *chunk;
2144 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2145 ret = read_one_chunk(root, &found_key, leaf, chunk);
2146 }
2147 path->slots[0]++;
2148 }
2149 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2150 key.objectid = 0;
2151 btrfs_release_path(root, path);
2152 goto again;
2153 }
2154
2155 btrfs_free_path(path);
2156 ret = 0;
2157error:
2158 return ret;
2159}
2160