| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2011 STRATO.  All rights reserved. | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or | 
|  | 5 | * modify it under the terms of the GNU General Public | 
|  | 6 | * License v2 as published by the Free Software Foundation. | 
|  | 7 | * | 
|  | 8 | * This program is distributed in the hope that it will be useful, | 
|  | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 11 | * General Public License for more details. | 
|  | 12 | * | 
|  | 13 | * You should have received a copy of the GNU General Public | 
|  | 14 | * License along with this program; if not, write to the | 
|  | 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 16 | * Boston, MA 021110-1307, USA. | 
|  | 17 | */ | 
|  | 18 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 19 | #include <linux/blkdev.h> | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 20 | #include <linux/ratelimit.h> | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 21 | #include "ctree.h" | 
|  | 22 | #include "volumes.h" | 
|  | 23 | #include "disk-io.h" | 
|  | 24 | #include "ordered-data.h" | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 25 | #include "transaction.h" | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 26 | #include "backref.h" | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 27 | #include "extent_io.h" | 
| Stefan Behrens | 21adbd5 | 2011-11-09 13:44:05 +0100 | [diff] [blame] | 28 | #include "check-integrity.h" | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 29 |  | 
|  | 30 | /* | 
|  | 31 | * This is only the first step towards a full-features scrub. It reads all | 
|  | 32 | * extent and super block and verifies the checksums. In case a bad checksum | 
|  | 33 | * is found or the extent cannot be read, good data will be written back if | 
|  | 34 | * any can be found. | 
|  | 35 | * | 
|  | 36 | * Future enhancements: | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 37 | *  - In case an unrepairable extent is encountered, track which files are | 
|  | 38 | *    affected and report them | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 39 | *  - track and record media errors, throw out bad devices | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 40 | *  - add a mode to also read unallocated space | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 41 | */ | 
|  | 42 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 43 | struct scrub_block; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 44 | struct scrub_dev; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 45 |  | 
|  | 46 | #define SCRUB_PAGES_PER_BIO	16	/* 64k per bio */ | 
|  | 47 | #define SCRUB_BIOS_PER_DEV	16	/* 1 MB per device in flight */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 48 | #define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */ | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 49 |  | 
|  | 50 | struct scrub_page { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 51 | struct scrub_block	*sblock; | 
|  | 52 | struct page		*page; | 
|  | 53 | struct block_device	*bdev; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 54 | u64			flags;  /* extent flags */ | 
|  | 55 | u64			generation; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 56 | u64			logical; | 
|  | 57 | u64			physical; | 
|  | 58 | struct { | 
|  | 59 | unsigned int	mirror_num:8; | 
|  | 60 | unsigned int	have_csum:1; | 
|  | 61 | unsigned int	io_error:1; | 
|  | 62 | }; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 63 | u8			csum[BTRFS_CSUM_SIZE]; | 
|  | 64 | }; | 
|  | 65 |  | 
|  | 66 | struct scrub_bio { | 
|  | 67 | int			index; | 
|  | 68 | struct scrub_dev	*sdev; | 
|  | 69 | struct bio		*bio; | 
|  | 70 | int			err; | 
|  | 71 | u64			logical; | 
|  | 72 | u64			physical; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 73 | struct scrub_page	*pagev[SCRUB_PAGES_PER_BIO]; | 
|  | 74 | int			page_count; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 75 | int			next_free; | 
|  | 76 | struct btrfs_work	work; | 
|  | 77 | }; | 
|  | 78 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 79 | struct scrub_block { | 
|  | 80 | struct scrub_page	pagev[SCRUB_MAX_PAGES_PER_BLOCK]; | 
|  | 81 | int			page_count; | 
|  | 82 | atomic_t		outstanding_pages; | 
|  | 83 | atomic_t		ref_count; /* free mem on transition to zero */ | 
|  | 84 | struct scrub_dev	*sdev; | 
|  | 85 | struct { | 
|  | 86 | unsigned int	header_error:1; | 
|  | 87 | unsigned int	checksum_error:1; | 
|  | 88 | unsigned int	no_io_error_seen:1; | 
|  | 89 | }; | 
|  | 90 | }; | 
|  | 91 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 92 | struct scrub_dev { | 
|  | 93 | struct scrub_bio	*bios[SCRUB_BIOS_PER_DEV]; | 
|  | 94 | struct btrfs_device	*dev; | 
|  | 95 | int			first_free; | 
|  | 96 | int			curr; | 
|  | 97 | atomic_t		in_flight; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 98 | atomic_t		fixup_cnt; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 99 | spinlock_t		list_lock; | 
|  | 100 | wait_queue_head_t	list_wait; | 
|  | 101 | u16			csum_size; | 
|  | 102 | struct list_head	csum_list; | 
|  | 103 | atomic_t		cancel_req; | 
| Arne Jansen | 8628764 | 2011-03-23 16:34:19 +0100 | [diff] [blame] | 104 | int			readonly; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 105 | int			pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */ | 
|  | 106 | u32			sectorsize; | 
|  | 107 | u32			nodesize; | 
|  | 108 | u32			leafsize; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 109 | /* | 
|  | 110 | * statistics | 
|  | 111 | */ | 
|  | 112 | struct btrfs_scrub_progress stat; | 
|  | 113 | spinlock_t		stat_lock; | 
|  | 114 | }; | 
|  | 115 |  | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 116 | struct scrub_fixup_nodatasum { | 
|  | 117 | struct scrub_dev	*sdev; | 
|  | 118 | u64			logical; | 
|  | 119 | struct btrfs_root	*root; | 
|  | 120 | struct btrfs_work	work; | 
|  | 121 | int			mirror_num; | 
|  | 122 | }; | 
|  | 123 |  | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 124 | struct scrub_warning { | 
|  | 125 | struct btrfs_path	*path; | 
|  | 126 | u64			extent_item_size; | 
|  | 127 | char			*scratch_buf; | 
|  | 128 | char			*msg_buf; | 
|  | 129 | const char		*errstr; | 
|  | 130 | sector_t		sector; | 
|  | 131 | u64			logical; | 
|  | 132 | struct btrfs_device	*dev; | 
|  | 133 | int			msg_bufsize; | 
|  | 134 | int			scratch_bufsize; | 
|  | 135 | }; | 
|  | 136 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 137 |  | 
|  | 138 | static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); | 
|  | 139 | static int scrub_setup_recheck_block(struct scrub_dev *sdev, | 
|  | 140 | struct btrfs_mapping_tree *map_tree, | 
|  | 141 | u64 length, u64 logical, | 
|  | 142 | struct scrub_block *sblock); | 
|  | 143 | static int scrub_recheck_block(struct btrfs_fs_info *fs_info, | 
|  | 144 | struct scrub_block *sblock, int is_metadata, | 
|  | 145 | int have_csum, u8 *csum, u64 generation, | 
|  | 146 | u16 csum_size); | 
|  | 147 | static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, | 
|  | 148 | struct scrub_block *sblock, | 
|  | 149 | int is_metadata, int have_csum, | 
|  | 150 | const u8 *csum, u64 generation, | 
|  | 151 | u16 csum_size); | 
|  | 152 | static void scrub_complete_bio_end_io(struct bio *bio, int err); | 
|  | 153 | static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, | 
|  | 154 | struct scrub_block *sblock_good, | 
|  | 155 | int force_write); | 
|  | 156 | static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | 
|  | 157 | struct scrub_block *sblock_good, | 
|  | 158 | int page_num, int force_write); | 
|  | 159 | static int scrub_checksum_data(struct scrub_block *sblock); | 
|  | 160 | static int scrub_checksum_tree_block(struct scrub_block *sblock); | 
|  | 161 | static int scrub_checksum_super(struct scrub_block *sblock); | 
|  | 162 | static void scrub_block_get(struct scrub_block *sblock); | 
|  | 163 | static void scrub_block_put(struct scrub_block *sblock); | 
|  | 164 | static int scrub_add_page_to_bio(struct scrub_dev *sdev, | 
|  | 165 | struct scrub_page *spage); | 
|  | 166 | static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len, | 
|  | 167 | u64 physical, u64 flags, u64 gen, int mirror_num, | 
|  | 168 | u8 *csum, int force); | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 169 | static void scrub_bio_end_io(struct bio *bio, int err); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 170 | static void scrub_bio_end_io_worker(struct btrfs_work *work); | 
|  | 171 | static void scrub_block_complete(struct scrub_block *sblock); | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 172 |  | 
|  | 173 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 174 | static void scrub_free_csums(struct scrub_dev *sdev) | 
|  | 175 | { | 
|  | 176 | while (!list_empty(&sdev->csum_list)) { | 
|  | 177 | struct btrfs_ordered_sum *sum; | 
|  | 178 | sum = list_first_entry(&sdev->csum_list, | 
|  | 179 | struct btrfs_ordered_sum, list); | 
|  | 180 | list_del(&sum->list); | 
|  | 181 | kfree(sum); | 
|  | 182 | } | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) | 
|  | 186 | { | 
|  | 187 | int i; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 188 |  | 
|  | 189 | if (!sdev) | 
|  | 190 | return; | 
|  | 191 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 192 | /* this can happen when scrub is cancelled */ | 
|  | 193 | if (sdev->curr != -1) { | 
|  | 194 | struct scrub_bio *sbio = sdev->bios[sdev->curr]; | 
|  | 195 |  | 
|  | 196 | for (i = 0; i < sbio->page_count; i++) { | 
|  | 197 | BUG_ON(!sbio->pagev[i]); | 
|  | 198 | BUG_ON(!sbio->pagev[i]->page); | 
|  | 199 | scrub_block_put(sbio->pagev[i]->sblock); | 
|  | 200 | } | 
|  | 201 | bio_put(sbio->bio); | 
|  | 202 | } | 
|  | 203 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 204 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { | 
|  | 205 | struct scrub_bio *sbio = sdev->bios[i]; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 206 |  | 
|  | 207 | if (!sbio) | 
|  | 208 | break; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 209 | kfree(sbio); | 
|  | 210 | } | 
|  | 211 |  | 
|  | 212 | scrub_free_csums(sdev); | 
|  | 213 | kfree(sdev); | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | static noinline_for_stack | 
|  | 217 | struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) | 
|  | 218 | { | 
|  | 219 | struct scrub_dev *sdev; | 
|  | 220 | int		i; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 221 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 222 | int pages_per_bio; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 223 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 224 | pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO, | 
|  | 225 | bio_get_nr_vecs(dev->bdev)); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 226 | sdev = kzalloc(sizeof(*sdev), GFP_NOFS); | 
|  | 227 | if (!sdev) | 
|  | 228 | goto nomem; | 
|  | 229 | sdev->dev = dev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 230 | sdev->pages_per_bio = pages_per_bio; | 
|  | 231 | sdev->curr = -1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 232 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 233 | struct scrub_bio *sbio; | 
|  | 234 |  | 
|  | 235 | sbio = kzalloc(sizeof(*sbio), GFP_NOFS); | 
|  | 236 | if (!sbio) | 
|  | 237 | goto nomem; | 
|  | 238 | sdev->bios[i] = sbio; | 
|  | 239 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 240 | sbio->index = i; | 
|  | 241 | sbio->sdev = sdev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 242 | sbio->page_count = 0; | 
|  | 243 | sbio->work.func = scrub_bio_end_io_worker; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 244 |  | 
|  | 245 | if (i != SCRUB_BIOS_PER_DEV-1) | 
|  | 246 | sdev->bios[i]->next_free = i + 1; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 247 | else | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 248 | sdev->bios[i]->next_free = -1; | 
|  | 249 | } | 
|  | 250 | sdev->first_free = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 251 | sdev->nodesize = dev->dev_root->nodesize; | 
|  | 252 | sdev->leafsize = dev->dev_root->leafsize; | 
|  | 253 | sdev->sectorsize = dev->dev_root->sectorsize; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 254 | atomic_set(&sdev->in_flight, 0); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 255 | atomic_set(&sdev->fixup_cnt, 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 256 | atomic_set(&sdev->cancel_req, 0); | 
| David Sterba | 6c41761 | 2011-04-13 15:41:04 +0200 | [diff] [blame] | 257 | sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 258 | INIT_LIST_HEAD(&sdev->csum_list); | 
|  | 259 |  | 
|  | 260 | spin_lock_init(&sdev->list_lock); | 
|  | 261 | spin_lock_init(&sdev->stat_lock); | 
|  | 262 | init_waitqueue_head(&sdev->list_wait); | 
|  | 263 | return sdev; | 
|  | 264 |  | 
|  | 265 | nomem: | 
|  | 266 | scrub_free_dev(sdev); | 
|  | 267 | return ERR_PTR(-ENOMEM); | 
|  | 268 | } | 
|  | 269 |  | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 270 | static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | 
|  | 271 | { | 
|  | 272 | u64 isize; | 
|  | 273 | u32 nlink; | 
|  | 274 | int ret; | 
|  | 275 | int i; | 
|  | 276 | struct extent_buffer *eb; | 
|  | 277 | struct btrfs_inode_item *inode_item; | 
|  | 278 | struct scrub_warning *swarn = ctx; | 
|  | 279 | struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info; | 
|  | 280 | struct inode_fs_paths *ipath = NULL; | 
|  | 281 | struct btrfs_root *local_root; | 
|  | 282 | struct btrfs_key root_key; | 
|  | 283 |  | 
|  | 284 | root_key.objectid = root; | 
|  | 285 | root_key.type = BTRFS_ROOT_ITEM_KEY; | 
|  | 286 | root_key.offset = (u64)-1; | 
|  | 287 | local_root = btrfs_read_fs_root_no_name(fs_info, &root_key); | 
|  | 288 | if (IS_ERR(local_root)) { | 
|  | 289 | ret = PTR_ERR(local_root); | 
|  | 290 | goto err; | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | ret = inode_item_info(inum, 0, local_root, swarn->path); | 
|  | 294 | if (ret) { | 
|  | 295 | btrfs_release_path(swarn->path); | 
|  | 296 | goto err; | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | eb = swarn->path->nodes[0]; | 
|  | 300 | inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], | 
|  | 301 | struct btrfs_inode_item); | 
|  | 302 | isize = btrfs_inode_size(eb, inode_item); | 
|  | 303 | nlink = btrfs_inode_nlink(eb, inode_item); | 
|  | 304 | btrfs_release_path(swarn->path); | 
|  | 305 |  | 
|  | 306 | ipath = init_ipath(4096, local_root, swarn->path); | 
| Dan Carpenter | 26bdef5 | 2011-11-16 11:28:01 +0300 | [diff] [blame] | 307 | if (IS_ERR(ipath)) { | 
|  | 308 | ret = PTR_ERR(ipath); | 
|  | 309 | ipath = NULL; | 
|  | 310 | goto err; | 
|  | 311 | } | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 312 | ret = paths_from_inode(inum, ipath); | 
|  | 313 |  | 
|  | 314 | if (ret < 0) | 
|  | 315 | goto err; | 
|  | 316 |  | 
|  | 317 | /* | 
|  | 318 | * we deliberately ignore the bit ipath might have been too small to | 
|  | 319 | * hold all of the paths here | 
|  | 320 | */ | 
|  | 321 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) | 
|  | 322 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 
|  | 323 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " | 
|  | 324 | "length %llu, links %u (path: %s)\n", swarn->errstr, | 
|  | 325 | swarn->logical, swarn->dev->name, | 
|  | 326 | (unsigned long long)swarn->sector, root, inum, offset, | 
|  | 327 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 
| Jeff Mahoney | 745c4d8 | 2011-11-20 07:31:57 -0500 | [diff] [blame] | 328 | (char *)(unsigned long)ipath->fspath->val[i]); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 329 |  | 
|  | 330 | free_ipath(ipath); | 
|  | 331 | return 0; | 
|  | 332 |  | 
|  | 333 | err: | 
|  | 334 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 
|  | 335 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " | 
|  | 336 | "resolving failed with ret=%d\n", swarn->errstr, | 
|  | 337 | swarn->logical, swarn->dev->name, | 
|  | 338 | (unsigned long long)swarn->sector, root, inum, offset, ret); | 
|  | 339 |  | 
|  | 340 | free_ipath(ipath); | 
|  | 341 | return 0; | 
|  | 342 | } | 
|  | 343 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 344 | static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 345 | { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 346 | struct btrfs_device *dev = sblock->sdev->dev; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 347 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; | 
|  | 348 | struct btrfs_path *path; | 
|  | 349 | struct btrfs_key found_key; | 
|  | 350 | struct extent_buffer *eb; | 
|  | 351 | struct btrfs_extent_item *ei; | 
|  | 352 | struct scrub_warning swarn; | 
|  | 353 | u32 item_size; | 
|  | 354 | int ret; | 
|  | 355 | u64 ref_root; | 
|  | 356 | u8 ref_level; | 
|  | 357 | unsigned long ptr = 0; | 
|  | 358 | const int bufsize = 4096; | 
| Jan Schmidt | 4692cf5 | 2011-12-02 14:56:41 +0100 | [diff] [blame] | 359 | u64 extent_item_pos; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 360 |  | 
|  | 361 | path = btrfs_alloc_path(); | 
|  | 362 |  | 
|  | 363 | swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS); | 
|  | 364 | swarn.msg_buf = kmalloc(bufsize, GFP_NOFS); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 365 | BUG_ON(sblock->page_count < 1); | 
|  | 366 | swarn.sector = (sblock->pagev[0].physical) >> 9; | 
|  | 367 | swarn.logical = sblock->pagev[0].logical; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 368 | swarn.errstr = errstr; | 
|  | 369 | swarn.dev = dev; | 
|  | 370 | swarn.msg_bufsize = bufsize; | 
|  | 371 | swarn.scratch_bufsize = bufsize; | 
|  | 372 |  | 
|  | 373 | if (!path || !swarn.scratch_buf || !swarn.msg_buf) | 
|  | 374 | goto out; | 
|  | 375 |  | 
|  | 376 | ret = extent_from_logical(fs_info, swarn.logical, path, &found_key); | 
|  | 377 | if (ret < 0) | 
|  | 378 | goto out; | 
|  | 379 |  | 
| Jan Schmidt | 4692cf5 | 2011-12-02 14:56:41 +0100 | [diff] [blame] | 380 | extent_item_pos = swarn.logical - found_key.objectid; | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 381 | swarn.extent_item_size = found_key.offset; | 
|  | 382 |  | 
|  | 383 | eb = path->nodes[0]; | 
|  | 384 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | 
|  | 385 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | 
| Jan Schmidt | 4692cf5 | 2011-12-02 14:56:41 +0100 | [diff] [blame] | 386 | btrfs_release_path(path); | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 387 |  | 
|  | 388 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 
|  | 389 | do { | 
|  | 390 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, | 
|  | 391 | &ref_root, &ref_level); | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 392 | printk(KERN_WARNING | 
|  | 393 | "btrfs: %s at logical %llu on dev %s, " | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 394 | "sector %llu: metadata %s (level %d) in tree " | 
|  | 395 | "%llu\n", errstr, swarn.logical, dev->name, | 
|  | 396 | (unsigned long long)swarn.sector, | 
|  | 397 | ref_level ? "node" : "leaf", | 
|  | 398 | ret < 0 ? -1 : ref_level, | 
|  | 399 | ret < 0 ? -1 : ref_root); | 
|  | 400 | } while (ret != 1); | 
|  | 401 | } else { | 
|  | 402 | swarn.path = path; | 
| Jan Schmidt | 7a3ae2f | 2012-03-23 17:32:28 +0100 | [diff] [blame] | 403 | iterate_extent_inodes(fs_info, found_key.objectid, | 
|  | 404 | extent_item_pos, 1, | 
| Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 405 | scrub_print_warning_inode, &swarn); | 
|  | 406 | } | 
|  | 407 |  | 
|  | 408 | out: | 
|  | 409 | btrfs_free_path(path); | 
|  | 410 | kfree(swarn.scratch_buf); | 
|  | 411 | kfree(swarn.msg_buf); | 
|  | 412 | } | 
|  | 413 |  | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 414 | static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx) | 
|  | 415 | { | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 416 | struct page *page = NULL; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 417 | unsigned long index; | 
|  | 418 | struct scrub_fixup_nodatasum *fixup = ctx; | 
|  | 419 | int ret; | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 420 | int corrected = 0; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 421 | struct btrfs_key key; | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 422 | struct inode *inode = NULL; | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 423 | u64 end = offset + PAGE_SIZE - 1; | 
|  | 424 | struct btrfs_root *local_root; | 
|  | 425 |  | 
|  | 426 | key.objectid = root; | 
|  | 427 | key.type = BTRFS_ROOT_ITEM_KEY; | 
|  | 428 | key.offset = (u64)-1; | 
|  | 429 | local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); | 
|  | 430 | if (IS_ERR(local_root)) | 
|  | 431 | return PTR_ERR(local_root); | 
|  | 432 |  | 
|  | 433 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 434 | key.objectid = inum; | 
|  | 435 | key.offset = 0; | 
|  | 436 | inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); | 
|  | 437 | if (IS_ERR(inode)) | 
|  | 438 | return PTR_ERR(inode); | 
|  | 439 |  | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 440 | index = offset >> PAGE_CACHE_SHIFT; | 
|  | 441 |  | 
|  | 442 | page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 443 | if (!page) { | 
|  | 444 | ret = -ENOMEM; | 
|  | 445 | goto out; | 
|  | 446 | } | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 447 |  | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 448 | if (PageUptodate(page)) { | 
|  | 449 | struct btrfs_mapping_tree *map_tree; | 
|  | 450 | if (PageDirty(page)) { | 
|  | 451 | /* | 
|  | 452 | * we need to write the data to the defect sector. the | 
|  | 453 | * data that was in that sector is not in memory, | 
|  | 454 | * because the page was modified. we must not write the | 
|  | 455 | * modified page to that sector. | 
|  | 456 | * | 
|  | 457 | * TODO: what could be done here: wait for the delalloc | 
|  | 458 | *       runner to write out that page (might involve | 
|  | 459 | *       COW) and see whether the sector is still | 
|  | 460 | *       referenced afterwards. | 
|  | 461 | * | 
|  | 462 | * For the meantime, we'll treat this error | 
|  | 463 | * incorrectable, although there is a chance that a | 
|  | 464 | * later scrub will find the bad sector again and that | 
|  | 465 | * there's no dirty page in memory, then. | 
|  | 466 | */ | 
|  | 467 | ret = -EIO; | 
|  | 468 | goto out; | 
|  | 469 | } | 
|  | 470 | map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree; | 
|  | 471 | ret = repair_io_failure(map_tree, offset, PAGE_SIZE, | 
|  | 472 | fixup->logical, page, | 
|  | 473 | fixup->mirror_num); | 
|  | 474 | unlock_page(page); | 
|  | 475 | corrected = !ret; | 
|  | 476 | } else { | 
|  | 477 | /* | 
|  | 478 | * we need to get good data first. the general readpage path | 
|  | 479 | * will call repair_io_failure for us, we just have to make | 
|  | 480 | * sure we read the bad mirror. | 
|  | 481 | */ | 
|  | 482 | ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, | 
|  | 483 | EXTENT_DAMAGED, GFP_NOFS); | 
|  | 484 | if (ret) { | 
|  | 485 | /* set_extent_bits should give proper error */ | 
|  | 486 | WARN_ON(ret > 0); | 
|  | 487 | if (ret > 0) | 
|  | 488 | ret = -EFAULT; | 
|  | 489 | goto out; | 
|  | 490 | } | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 491 |  | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 492 | ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page, | 
|  | 493 | btrfs_get_extent, | 
|  | 494 | fixup->mirror_num); | 
|  | 495 | wait_on_page_locked(page); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 496 |  | 
| Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 497 | corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset, | 
|  | 498 | end, EXTENT_DAMAGED, 0, NULL); | 
|  | 499 | if (!corrected) | 
|  | 500 | clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end, | 
|  | 501 | EXTENT_DAMAGED, GFP_NOFS); | 
|  | 502 | } | 
|  | 503 |  | 
|  | 504 | out: | 
|  | 505 | if (page) | 
|  | 506 | put_page(page); | 
|  | 507 | if (inode) | 
|  | 508 | iput(inode); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 509 |  | 
|  | 510 | if (ret < 0) | 
|  | 511 | return ret; | 
|  | 512 |  | 
|  | 513 | if (ret == 0 && corrected) { | 
|  | 514 | /* | 
|  | 515 | * we only need to call readpage for one of the inodes belonging | 
|  | 516 | * to this extent. so make iterate_extent_inodes stop | 
|  | 517 | */ | 
|  | 518 | return 1; | 
|  | 519 | } | 
|  | 520 |  | 
|  | 521 | return -EIO; | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | static void scrub_fixup_nodatasum(struct btrfs_work *work) | 
|  | 525 | { | 
|  | 526 | int ret; | 
|  | 527 | struct scrub_fixup_nodatasum *fixup; | 
|  | 528 | struct scrub_dev *sdev; | 
|  | 529 | struct btrfs_trans_handle *trans = NULL; | 
|  | 530 | struct btrfs_fs_info *fs_info; | 
|  | 531 | struct btrfs_path *path; | 
|  | 532 | int uncorrectable = 0; | 
|  | 533 |  | 
|  | 534 | fixup = container_of(work, struct scrub_fixup_nodatasum, work); | 
|  | 535 | sdev = fixup->sdev; | 
|  | 536 | fs_info = fixup->root->fs_info; | 
|  | 537 |  | 
|  | 538 | path = btrfs_alloc_path(); | 
|  | 539 | if (!path) { | 
|  | 540 | spin_lock(&sdev->stat_lock); | 
|  | 541 | ++sdev->stat.malloc_errors; | 
|  | 542 | spin_unlock(&sdev->stat_lock); | 
|  | 543 | uncorrectable = 1; | 
|  | 544 | goto out; | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | trans = btrfs_join_transaction(fixup->root); | 
|  | 548 | if (IS_ERR(trans)) { | 
|  | 549 | uncorrectable = 1; | 
|  | 550 | goto out; | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | /* | 
|  | 554 | * the idea is to trigger a regular read through the standard path. we | 
|  | 555 | * read a page from the (failed) logical address by specifying the | 
|  | 556 | * corresponding copynum of the failed sector. thus, that readpage is | 
|  | 557 | * expected to fail. | 
|  | 558 | * that is the point where on-the-fly error correction will kick in | 
|  | 559 | * (once it's finished) and rewrite the failed sector if a good copy | 
|  | 560 | * can be found. | 
|  | 561 | */ | 
|  | 562 | ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info, | 
|  | 563 | path, scrub_fixup_readpage, | 
|  | 564 | fixup); | 
|  | 565 | if (ret < 0) { | 
|  | 566 | uncorrectable = 1; | 
|  | 567 | goto out; | 
|  | 568 | } | 
|  | 569 | WARN_ON(ret != 1); | 
|  | 570 |  | 
|  | 571 | spin_lock(&sdev->stat_lock); | 
|  | 572 | ++sdev->stat.corrected_errors; | 
|  | 573 | spin_unlock(&sdev->stat_lock); | 
|  | 574 |  | 
|  | 575 | out: | 
|  | 576 | if (trans && !IS_ERR(trans)) | 
|  | 577 | btrfs_end_transaction(trans, fixup->root); | 
|  | 578 | if (uncorrectable) { | 
|  | 579 | spin_lock(&sdev->stat_lock); | 
|  | 580 | ++sdev->stat.uncorrectable_errors; | 
|  | 581 | spin_unlock(&sdev->stat_lock); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 582 | printk_ratelimited(KERN_ERR | 
|  | 583 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", | 
|  | 584 | (unsigned long long)fixup->logical, sdev->dev->name); | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 585 | } | 
|  | 586 |  | 
|  | 587 | btrfs_free_path(path); | 
|  | 588 | kfree(fixup); | 
|  | 589 |  | 
|  | 590 | /* see caller why we're pretending to be paused in the scrub counters */ | 
|  | 591 | mutex_lock(&fs_info->scrub_lock); | 
|  | 592 | atomic_dec(&fs_info->scrubs_running); | 
|  | 593 | atomic_dec(&fs_info->scrubs_paused); | 
|  | 594 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 595 | atomic_dec(&sdev->fixup_cnt); | 
|  | 596 | wake_up(&fs_info->scrub_pause_wait); | 
|  | 597 | wake_up(&sdev->list_wait); | 
|  | 598 | } | 
|  | 599 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 600 | /* | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 601 | * scrub_handle_errored_block gets called when either verification of the | 
|  | 602 | * pages failed or the bio failed to read, e.g. with EIO. In the latter | 
|  | 603 | * case, this function handles all pages in the bio, even though only one | 
|  | 604 | * may be bad. | 
|  | 605 | * The goal of this function is to repair the errored block by using the | 
|  | 606 | * contents of one of the mirrors. | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 607 | */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 608 | static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 609 | { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 610 | struct scrub_dev *sdev = sblock_to_check->sdev; | 
|  | 611 | struct btrfs_fs_info *fs_info; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 612 | u64 length; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 613 | u64 logical; | 
|  | 614 | u64 generation; | 
|  | 615 | unsigned int failed_mirror_index; | 
|  | 616 | unsigned int is_metadata; | 
|  | 617 | unsigned int have_csum; | 
|  | 618 | u8 *csum; | 
|  | 619 | struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ | 
|  | 620 | struct scrub_block *sblock_bad; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 621 | int ret; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 622 | int mirror_index; | 
|  | 623 | int page_num; | 
|  | 624 | int success; | 
|  | 625 | static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, | 
|  | 626 | DEFAULT_RATELIMIT_BURST); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 627 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 628 | BUG_ON(sblock_to_check->page_count < 1); | 
|  | 629 | fs_info = sdev->dev->dev_root->fs_info; | 
|  | 630 | length = sblock_to_check->page_count * PAGE_SIZE; | 
|  | 631 | logical = sblock_to_check->pagev[0].logical; | 
|  | 632 | generation = sblock_to_check->pagev[0].generation; | 
|  | 633 | BUG_ON(sblock_to_check->pagev[0].mirror_num < 1); | 
|  | 634 | failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1; | 
|  | 635 | is_metadata = !(sblock_to_check->pagev[0].flags & | 
|  | 636 | BTRFS_EXTENT_FLAG_DATA); | 
|  | 637 | have_csum = sblock_to_check->pagev[0].have_csum; | 
|  | 638 | csum = sblock_to_check->pagev[0].csum; | 
|  | 639 |  | 
|  | 640 | /* | 
|  | 641 | * read all mirrors one after the other. This includes to | 
|  | 642 | * re-read the extent or metadata block that failed (that was | 
|  | 643 | * the cause that this fixup code is called) another time, | 
|  | 644 | * page by page this time in order to know which pages | 
|  | 645 | * caused I/O errors and which ones are good (for all mirrors). | 
|  | 646 | * It is the goal to handle the situation when more than one | 
|  | 647 | * mirror contains I/O errors, but the errors do not | 
|  | 648 | * overlap, i.e. the data can be repaired by selecting the | 
|  | 649 | * pages from those mirrors without I/O error on the | 
|  | 650 | * particular pages. One example (with blocks >= 2 * PAGE_SIZE) | 
|  | 651 | * would be that mirror #1 has an I/O error on the first page, | 
|  | 652 | * the second page is good, and mirror #2 has an I/O error on | 
|  | 653 | * the second page, but the first page is good. | 
|  | 654 | * Then the first page of the first mirror can be repaired by | 
|  | 655 | * taking the first page of the second mirror, and the | 
|  | 656 | * second page of the second mirror can be repaired by | 
|  | 657 | * copying the contents of the 2nd page of the 1st mirror. | 
|  | 658 | * One more note: if the pages of one mirror contain I/O | 
|  | 659 | * errors, the checksum cannot be verified. In order to get | 
|  | 660 | * the best data for repairing, the first attempt is to find | 
|  | 661 | * a mirror without I/O errors and with a validated checksum. | 
|  | 662 | * Only if this is not possible, the pages are picked from | 
|  | 663 | * mirrors with I/O errors without considering the checksum. | 
|  | 664 | * If the latter is the case, at the end, the checksum of the | 
|  | 665 | * repaired area is verified in order to correctly maintain | 
|  | 666 | * the statistics. | 
|  | 667 | */ | 
|  | 668 |  | 
|  | 669 | sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS * | 
|  | 670 | sizeof(*sblocks_for_recheck), | 
|  | 671 | GFP_NOFS); | 
|  | 672 | if (!sblocks_for_recheck) { | 
|  | 673 | spin_lock(&sdev->stat_lock); | 
|  | 674 | sdev->stat.malloc_errors++; | 
|  | 675 | sdev->stat.read_errors++; | 
|  | 676 | sdev->stat.uncorrectable_errors++; | 
|  | 677 | spin_unlock(&sdev->stat_lock); | 
|  | 678 | goto out; | 
|  | 679 | } | 
|  | 680 |  | 
|  | 681 | /* setup the context, map the logical blocks and alloc the pages */ | 
|  | 682 | ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length, | 
|  | 683 | logical, sblocks_for_recheck); | 
|  | 684 | if (ret) { | 
|  | 685 | spin_lock(&sdev->stat_lock); | 
|  | 686 | sdev->stat.read_errors++; | 
|  | 687 | sdev->stat.uncorrectable_errors++; | 
|  | 688 | spin_unlock(&sdev->stat_lock); | 
|  | 689 | goto out; | 
|  | 690 | } | 
|  | 691 | BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); | 
|  | 692 | sblock_bad = sblocks_for_recheck + failed_mirror_index; | 
|  | 693 |  | 
|  | 694 | /* build and submit the bios for the failed mirror, check checksums */ | 
|  | 695 | ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, | 
|  | 696 | csum, generation, sdev->csum_size); | 
|  | 697 | if (ret) { | 
|  | 698 | spin_lock(&sdev->stat_lock); | 
|  | 699 | sdev->stat.read_errors++; | 
|  | 700 | sdev->stat.uncorrectable_errors++; | 
|  | 701 | spin_unlock(&sdev->stat_lock); | 
|  | 702 | goto out; | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | if (!sblock_bad->header_error && !sblock_bad->checksum_error && | 
|  | 706 | sblock_bad->no_io_error_seen) { | 
|  | 707 | /* | 
|  | 708 | * the error disappeared after reading page by page, or | 
|  | 709 | * the area was part of a huge bio and other parts of the | 
|  | 710 | * bio caused I/O errors, or the block layer merged several | 
|  | 711 | * read requests into one and the error is caused by a | 
|  | 712 | * different bio (usually one of the two latter cases is | 
|  | 713 | * the cause) | 
|  | 714 | */ | 
|  | 715 | spin_lock(&sdev->stat_lock); | 
|  | 716 | sdev->stat.unverified_errors++; | 
|  | 717 | spin_unlock(&sdev->stat_lock); | 
|  | 718 |  | 
|  | 719 | goto out; | 
|  | 720 | } | 
|  | 721 |  | 
|  | 722 | if (!sblock_bad->no_io_error_seen) { | 
|  | 723 | spin_lock(&sdev->stat_lock); | 
|  | 724 | sdev->stat.read_errors++; | 
|  | 725 | spin_unlock(&sdev->stat_lock); | 
|  | 726 | if (__ratelimit(&_rs)) | 
|  | 727 | scrub_print_warning("i/o error", sblock_to_check); | 
|  | 728 | } else if (sblock_bad->checksum_error) { | 
|  | 729 | spin_lock(&sdev->stat_lock); | 
|  | 730 | sdev->stat.csum_errors++; | 
|  | 731 | spin_unlock(&sdev->stat_lock); | 
|  | 732 | if (__ratelimit(&_rs)) | 
|  | 733 | scrub_print_warning("checksum error", sblock_to_check); | 
|  | 734 | } else if (sblock_bad->header_error) { | 
|  | 735 | spin_lock(&sdev->stat_lock); | 
|  | 736 | sdev->stat.verify_errors++; | 
|  | 737 | spin_unlock(&sdev->stat_lock); | 
|  | 738 | if (__ratelimit(&_rs)) | 
|  | 739 | scrub_print_warning("checksum/header error", | 
|  | 740 | sblock_to_check); | 
|  | 741 | } | 
|  | 742 |  | 
|  | 743 | if (sdev->readonly) | 
|  | 744 | goto did_not_correct_error; | 
|  | 745 |  | 
|  | 746 | if (!is_metadata && !have_csum) { | 
|  | 747 | struct scrub_fixup_nodatasum *fixup_nodatasum; | 
|  | 748 |  | 
|  | 749 | /* | 
|  | 750 | * !is_metadata and !have_csum, this means that the data | 
|  | 751 | * might not be COW'ed, that it might be modified | 
|  | 752 | * concurrently. The general strategy to work on the | 
|  | 753 | * commit root does not help in the case when COW is not | 
|  | 754 | * used. | 
|  | 755 | */ | 
|  | 756 | fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS); | 
|  | 757 | if (!fixup_nodatasum) | 
|  | 758 | goto did_not_correct_error; | 
|  | 759 | fixup_nodatasum->sdev = sdev; | 
|  | 760 | fixup_nodatasum->logical = logical; | 
|  | 761 | fixup_nodatasum->root = fs_info->extent_root; | 
|  | 762 | fixup_nodatasum->mirror_num = failed_mirror_index + 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 763 | /* | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 764 | * increment scrubs_running to prevent cancel requests from | 
|  | 765 | * completing as long as a fixup worker is running. we must also | 
|  | 766 | * increment scrubs_paused to prevent deadlocking on pause | 
|  | 767 | * requests used for transactions commits (as the worker uses a | 
|  | 768 | * transaction context). it is safe to regard the fixup worker | 
|  | 769 | * as paused for all matters practical. effectively, we only | 
|  | 770 | * avoid cancellation requests from completing. | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 771 | */ | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 772 | mutex_lock(&fs_info->scrub_lock); | 
|  | 773 | atomic_inc(&fs_info->scrubs_running); | 
|  | 774 | atomic_inc(&fs_info->scrubs_paused); | 
|  | 775 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 776 | atomic_inc(&sdev->fixup_cnt); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 777 | fixup_nodatasum->work.func = scrub_fixup_nodatasum; | 
|  | 778 | btrfs_queue_worker(&fs_info->scrub_workers, | 
|  | 779 | &fixup_nodatasum->work); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 780 | goto out; | 
|  | 781 | } | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 782 |  | 
|  | 783 | /* | 
|  | 784 | * now build and submit the bios for the other mirrors, check | 
|  | 785 | * checksums | 
|  | 786 | */ | 
|  | 787 | for (mirror_index = 0; | 
|  | 788 | mirror_index < BTRFS_MAX_MIRRORS && | 
|  | 789 | sblocks_for_recheck[mirror_index].page_count > 0; | 
|  | 790 | mirror_index++) { | 
|  | 791 | if (mirror_index == failed_mirror_index) | 
|  | 792 | continue; | 
|  | 793 |  | 
|  | 794 | /* build and submit the bios, check checksums */ | 
|  | 795 | ret = scrub_recheck_block(fs_info, | 
|  | 796 | sblocks_for_recheck + mirror_index, | 
|  | 797 | is_metadata, have_csum, csum, | 
|  | 798 | generation, sdev->csum_size); | 
|  | 799 | if (ret) | 
|  | 800 | goto did_not_correct_error; | 
|  | 801 | } | 
|  | 802 |  | 
|  | 803 | /* | 
|  | 804 | * first try to pick the mirror which is completely without I/O | 
|  | 805 | * errors and also does not have a checksum error. | 
|  | 806 | * If one is found, and if a checksum is present, the full block | 
|  | 807 | * that is known to contain an error is rewritten. Afterwards | 
|  | 808 | * the block is known to be corrected. | 
|  | 809 | * If a mirror is found which is completely correct, and no | 
|  | 810 | * checksum is present, only those pages are rewritten that had | 
|  | 811 | * an I/O error in the block to be repaired, since it cannot be | 
|  | 812 | * determined, which copy of the other pages is better (and it | 
|  | 813 | * could happen otherwise that a correct page would be | 
|  | 814 | * overwritten by a bad one). | 
|  | 815 | */ | 
|  | 816 | for (mirror_index = 0; | 
|  | 817 | mirror_index < BTRFS_MAX_MIRRORS && | 
|  | 818 | sblocks_for_recheck[mirror_index].page_count > 0; | 
|  | 819 | mirror_index++) { | 
|  | 820 | struct scrub_block *sblock_other = sblocks_for_recheck + | 
|  | 821 | mirror_index; | 
|  | 822 |  | 
|  | 823 | if (!sblock_other->header_error && | 
|  | 824 | !sblock_other->checksum_error && | 
|  | 825 | sblock_other->no_io_error_seen) { | 
|  | 826 | int force_write = is_metadata || have_csum; | 
|  | 827 |  | 
|  | 828 | ret = scrub_repair_block_from_good_copy(sblock_bad, | 
|  | 829 | sblock_other, | 
|  | 830 | force_write); | 
|  | 831 | if (0 == ret) | 
|  | 832 | goto corrected_error; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 833 | } | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 834 | } | 
|  | 835 |  | 
|  | 836 | /* | 
|  | 837 | * in case of I/O errors in the area that is supposed to be | 
|  | 838 | * repaired, continue by picking good copies of those pages. | 
|  | 839 | * Select the good pages from mirrors to rewrite bad pages from | 
|  | 840 | * the area to fix. Afterwards verify the checksum of the block | 
|  | 841 | * that is supposed to be repaired. This verification step is | 
|  | 842 | * only done for the purpose of statistic counting and for the | 
|  | 843 | * final scrub report, whether errors remain. | 
|  | 844 | * A perfect algorithm could make use of the checksum and try | 
|  | 845 | * all possible combinations of pages from the different mirrors | 
|  | 846 | * until the checksum verification succeeds. For example, when | 
|  | 847 | * the 2nd page of mirror #1 faces I/O errors, and the 2nd page | 
|  | 848 | * of mirror #2 is readable but the final checksum test fails, | 
|  | 849 | * then the 2nd page of mirror #3 could be tried, whether now | 
|  | 850 | * the final checksum succeedes. But this would be a rare | 
|  | 851 | * exception and is therefore not implemented. At least it is | 
|  | 852 | * avoided that the good copy is overwritten. | 
|  | 853 | * A more useful improvement would be to pick the sectors | 
|  | 854 | * without I/O error based on sector sizes (512 bytes on legacy | 
|  | 855 | * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one | 
|  | 856 | * mirror could be repaired by taking 512 byte of a different | 
|  | 857 | * mirror, even if other 512 byte sectors in the same PAGE_SIZE | 
|  | 858 | * area are unreadable. | 
|  | 859 | */ | 
|  | 860 |  | 
|  | 861 | /* can only fix I/O errors from here on */ | 
|  | 862 | if (sblock_bad->no_io_error_seen) | 
|  | 863 | goto did_not_correct_error; | 
|  | 864 |  | 
|  | 865 | success = 1; | 
|  | 866 | for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { | 
|  | 867 | struct scrub_page *page_bad = sblock_bad->pagev + page_num; | 
|  | 868 |  | 
|  | 869 | if (!page_bad->io_error) | 
|  | 870 | continue; | 
|  | 871 |  | 
|  | 872 | for (mirror_index = 0; | 
|  | 873 | mirror_index < BTRFS_MAX_MIRRORS && | 
|  | 874 | sblocks_for_recheck[mirror_index].page_count > 0; | 
|  | 875 | mirror_index++) { | 
|  | 876 | struct scrub_block *sblock_other = sblocks_for_recheck + | 
|  | 877 | mirror_index; | 
|  | 878 | struct scrub_page *page_other = sblock_other->pagev + | 
|  | 879 | page_num; | 
|  | 880 |  | 
|  | 881 | if (!page_other->io_error) { | 
|  | 882 | ret = scrub_repair_page_from_good_copy( | 
|  | 883 | sblock_bad, sblock_other, page_num, 0); | 
|  | 884 | if (0 == ret) { | 
|  | 885 | page_bad->io_error = 0; | 
|  | 886 | break; /* succeeded for this page */ | 
|  | 887 | } | 
| Jan Schmidt | 13db62b | 2011-06-13 19:56:13 +0200 | [diff] [blame] | 888 | } | 
|  | 889 | } | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 890 |  | 
|  | 891 | if (page_bad->io_error) { | 
|  | 892 | /* did not find a mirror to copy the page from */ | 
|  | 893 | success = 0; | 
|  | 894 | } | 
|  | 895 | } | 
|  | 896 |  | 
|  | 897 | if (success) { | 
|  | 898 | if (is_metadata || have_csum) { | 
|  | 899 | /* | 
|  | 900 | * need to verify the checksum now that all | 
|  | 901 | * sectors on disk are repaired (the write | 
|  | 902 | * request for data to be repaired is on its way). | 
|  | 903 | * Just be lazy and use scrub_recheck_block() | 
|  | 904 | * which re-reads the data before the checksum | 
|  | 905 | * is verified, but most likely the data comes out | 
|  | 906 | * of the page cache. | 
|  | 907 | */ | 
|  | 908 | ret = scrub_recheck_block(fs_info, sblock_bad, | 
|  | 909 | is_metadata, have_csum, csum, | 
|  | 910 | generation, sdev->csum_size); | 
|  | 911 | if (!ret && !sblock_bad->header_error && | 
|  | 912 | !sblock_bad->checksum_error && | 
|  | 913 | sblock_bad->no_io_error_seen) | 
|  | 914 | goto corrected_error; | 
|  | 915 | else | 
|  | 916 | goto did_not_correct_error; | 
|  | 917 | } else { | 
|  | 918 | corrected_error: | 
|  | 919 | spin_lock(&sdev->stat_lock); | 
|  | 920 | sdev->stat.corrected_errors++; | 
|  | 921 | spin_unlock(&sdev->stat_lock); | 
|  | 922 | printk_ratelimited(KERN_ERR | 
|  | 923 | "btrfs: fixed up error at logical %llu on dev %s\n", | 
|  | 924 | (unsigned long long)logical, sdev->dev->name); | 
|  | 925 | } | 
|  | 926 | } else { | 
|  | 927 | did_not_correct_error: | 
|  | 928 | spin_lock(&sdev->stat_lock); | 
|  | 929 | sdev->stat.uncorrectable_errors++; | 
|  | 930 | spin_unlock(&sdev->stat_lock); | 
|  | 931 | printk_ratelimited(KERN_ERR | 
|  | 932 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", | 
|  | 933 | (unsigned long long)logical, sdev->dev->name); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 934 | } | 
|  | 935 |  | 
|  | 936 | out: | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 937 | if (sblocks_for_recheck) { | 
|  | 938 | for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; | 
|  | 939 | mirror_index++) { | 
|  | 940 | struct scrub_block *sblock = sblocks_for_recheck + | 
|  | 941 | mirror_index; | 
|  | 942 | int page_index; | 
|  | 943 |  | 
|  | 944 | for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO; | 
|  | 945 | page_index++) | 
|  | 946 | if (sblock->pagev[page_index].page) | 
|  | 947 | __free_page( | 
|  | 948 | sblock->pagev[page_index].page); | 
|  | 949 | } | 
|  | 950 | kfree(sblocks_for_recheck); | 
|  | 951 | } | 
|  | 952 |  | 
|  | 953 | return 0; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 954 | } | 
|  | 955 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 956 | static int scrub_setup_recheck_block(struct scrub_dev *sdev, | 
|  | 957 | struct btrfs_mapping_tree *map_tree, | 
|  | 958 | u64 length, u64 logical, | 
|  | 959 | struct scrub_block *sblocks_for_recheck) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 960 | { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 961 | int page_index; | 
|  | 962 | int mirror_index; | 
|  | 963 | int ret; | 
|  | 964 |  | 
|  | 965 | /* | 
|  | 966 | * note: the three members sdev, ref_count and outstanding_pages | 
|  | 967 | * are not used (and not set) in the blocks that are used for | 
|  | 968 | * the recheck procedure | 
|  | 969 | */ | 
|  | 970 |  | 
|  | 971 | page_index = 0; | 
|  | 972 | while (length > 0) { | 
|  | 973 | u64 sublen = min_t(u64, length, PAGE_SIZE); | 
|  | 974 | u64 mapped_length = sublen; | 
|  | 975 | struct btrfs_bio *bbio = NULL; | 
|  | 976 |  | 
|  | 977 | /* | 
|  | 978 | * with a length of PAGE_SIZE, each returned stripe | 
|  | 979 | * represents one mirror | 
|  | 980 | */ | 
|  | 981 | ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length, | 
|  | 982 | &bbio, 0); | 
|  | 983 | if (ret || !bbio || mapped_length < sublen) { | 
|  | 984 | kfree(bbio); | 
|  | 985 | return -EIO; | 
|  | 986 | } | 
|  | 987 |  | 
|  | 988 | BUG_ON(page_index >= SCRUB_PAGES_PER_BIO); | 
|  | 989 | for (mirror_index = 0; mirror_index < (int)bbio->num_stripes; | 
|  | 990 | mirror_index++) { | 
|  | 991 | struct scrub_block *sblock; | 
|  | 992 | struct scrub_page *page; | 
|  | 993 |  | 
|  | 994 | if (mirror_index >= BTRFS_MAX_MIRRORS) | 
|  | 995 | continue; | 
|  | 996 |  | 
|  | 997 | sblock = sblocks_for_recheck + mirror_index; | 
|  | 998 | page = sblock->pagev + page_index; | 
|  | 999 | page->logical = logical; | 
|  | 1000 | page->physical = bbio->stripes[mirror_index].physical; | 
| Stefan Behrens | ea9947b | 2012-05-04 15:16:07 -0400 | [diff] [blame] | 1001 | /* for missing devices, bdev is NULL */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1002 | page->bdev = bbio->stripes[mirror_index].dev->bdev; | 
|  | 1003 | page->mirror_num = mirror_index + 1; | 
|  | 1004 | page->page = alloc_page(GFP_NOFS); | 
|  | 1005 | if (!page->page) { | 
|  | 1006 | spin_lock(&sdev->stat_lock); | 
|  | 1007 | sdev->stat.malloc_errors++; | 
|  | 1008 | spin_unlock(&sdev->stat_lock); | 
|  | 1009 | return -ENOMEM; | 
|  | 1010 | } | 
|  | 1011 | sblock->page_count++; | 
|  | 1012 | } | 
|  | 1013 | kfree(bbio); | 
|  | 1014 | length -= sublen; | 
|  | 1015 | logical += sublen; | 
|  | 1016 | page_index++; | 
|  | 1017 | } | 
|  | 1018 |  | 
|  | 1019 | return 0; | 
|  | 1020 | } | 
|  | 1021 |  | 
|  | 1022 | /* | 
|  | 1023 | * this function will check the on disk data for checksum errors, header | 
|  | 1024 | * errors and read I/O errors. If any I/O errors happen, the exact pages | 
|  | 1025 | * which are errored are marked as being bad. The goal is to enable scrub | 
|  | 1026 | * to take those pages that are not errored from all the mirrors so that | 
|  | 1027 | * the pages that are errored in the just handled mirror can be repaired. | 
|  | 1028 | */ | 
|  | 1029 | static int scrub_recheck_block(struct btrfs_fs_info *fs_info, | 
|  | 1030 | struct scrub_block *sblock, int is_metadata, | 
|  | 1031 | int have_csum, u8 *csum, u64 generation, | 
|  | 1032 | u16 csum_size) | 
|  | 1033 | { | 
|  | 1034 | int page_num; | 
|  | 1035 |  | 
|  | 1036 | sblock->no_io_error_seen = 1; | 
|  | 1037 | sblock->header_error = 0; | 
|  | 1038 | sblock->checksum_error = 0; | 
|  | 1039 |  | 
|  | 1040 | for (page_num = 0; page_num < sblock->page_count; page_num++) { | 
|  | 1041 | struct bio *bio; | 
|  | 1042 | int ret; | 
|  | 1043 | struct scrub_page *page = sblock->pagev + page_num; | 
|  | 1044 | DECLARE_COMPLETION_ONSTACK(complete); | 
|  | 1045 |  | 
| Stefan Behrens | ea9947b | 2012-05-04 15:16:07 -0400 | [diff] [blame] | 1046 | if (page->bdev == NULL) { | 
|  | 1047 | page->io_error = 1; | 
|  | 1048 | sblock->no_io_error_seen = 0; | 
|  | 1049 | continue; | 
|  | 1050 | } | 
|  | 1051 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1052 | BUG_ON(!page->page); | 
|  | 1053 | bio = bio_alloc(GFP_NOFS, 1); | 
| Tsutomu Itoh | e627ee7 | 2012-04-12 16:03:56 -0400 | [diff] [blame] | 1054 | if (!bio) | 
|  | 1055 | return -EIO; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1056 | bio->bi_bdev = page->bdev; | 
|  | 1057 | bio->bi_sector = page->physical >> 9; | 
|  | 1058 | bio->bi_end_io = scrub_complete_bio_end_io; | 
|  | 1059 | bio->bi_private = &complete; | 
|  | 1060 |  | 
|  | 1061 | ret = bio_add_page(bio, page->page, PAGE_SIZE, 0); | 
|  | 1062 | if (PAGE_SIZE != ret) { | 
|  | 1063 | bio_put(bio); | 
|  | 1064 | return -EIO; | 
|  | 1065 | } | 
|  | 1066 | btrfsic_submit_bio(READ, bio); | 
|  | 1067 |  | 
|  | 1068 | /* this will also unplug the queue */ | 
|  | 1069 | wait_for_completion(&complete); | 
|  | 1070 |  | 
|  | 1071 | page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags); | 
|  | 1072 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 
|  | 1073 | sblock->no_io_error_seen = 0; | 
|  | 1074 | bio_put(bio); | 
|  | 1075 | } | 
|  | 1076 |  | 
|  | 1077 | if (sblock->no_io_error_seen) | 
|  | 1078 | scrub_recheck_block_checksum(fs_info, sblock, is_metadata, | 
|  | 1079 | have_csum, csum, generation, | 
|  | 1080 | csum_size); | 
|  | 1081 |  | 
|  | 1082 | return 0; | 
|  | 1083 | } | 
|  | 1084 |  | 
|  | 1085 | static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, | 
|  | 1086 | struct scrub_block *sblock, | 
|  | 1087 | int is_metadata, int have_csum, | 
|  | 1088 | const u8 *csum, u64 generation, | 
|  | 1089 | u16 csum_size) | 
|  | 1090 | { | 
|  | 1091 | int page_num; | 
|  | 1092 | u8 calculated_csum[BTRFS_CSUM_SIZE]; | 
|  | 1093 | u32 crc = ~(u32)0; | 
|  | 1094 | struct btrfs_root *root = fs_info->extent_root; | 
|  | 1095 | void *mapped_buffer; | 
|  | 1096 |  | 
|  | 1097 | BUG_ON(!sblock->pagev[0].page); | 
|  | 1098 | if (is_metadata) { | 
|  | 1099 | struct btrfs_header *h; | 
|  | 1100 |  | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1101 | mapped_buffer = kmap_atomic(sblock->pagev[0].page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1102 | h = (struct btrfs_header *)mapped_buffer; | 
|  | 1103 |  | 
|  | 1104 | if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) || | 
|  | 1105 | generation != le64_to_cpu(h->generation) || | 
|  | 1106 | memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || | 
|  | 1107 | memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, | 
|  | 1108 | BTRFS_UUID_SIZE)) | 
|  | 1109 | sblock->header_error = 1; | 
|  | 1110 | csum = h->csum; | 
|  | 1111 | } else { | 
|  | 1112 | if (!have_csum) | 
|  | 1113 | return; | 
|  | 1114 |  | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1115 | mapped_buffer = kmap_atomic(sblock->pagev[0].page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1116 | } | 
|  | 1117 |  | 
|  | 1118 | for (page_num = 0;;) { | 
|  | 1119 | if (page_num == 0 && is_metadata) | 
|  | 1120 | crc = btrfs_csum_data(root, | 
|  | 1121 | ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE, | 
|  | 1122 | crc, PAGE_SIZE - BTRFS_CSUM_SIZE); | 
|  | 1123 | else | 
|  | 1124 | crc = btrfs_csum_data(root, mapped_buffer, crc, | 
|  | 1125 | PAGE_SIZE); | 
|  | 1126 |  | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1127 | kunmap_atomic(mapped_buffer); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1128 | page_num++; | 
|  | 1129 | if (page_num >= sblock->page_count) | 
|  | 1130 | break; | 
|  | 1131 | BUG_ON(!sblock->pagev[page_num].page); | 
|  | 1132 |  | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1133 | mapped_buffer = kmap_atomic(sblock->pagev[page_num].page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1134 | } | 
|  | 1135 |  | 
|  | 1136 | btrfs_csum_final(crc, calculated_csum); | 
|  | 1137 | if (memcmp(calculated_csum, csum, csum_size)) | 
|  | 1138 | sblock->checksum_error = 1; | 
|  | 1139 | } | 
|  | 1140 |  | 
|  | 1141 | static void scrub_complete_bio_end_io(struct bio *bio, int err) | 
|  | 1142 | { | 
|  | 1143 | complete((struct completion *)bio->bi_private); | 
|  | 1144 | } | 
|  | 1145 |  | 
|  | 1146 | static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, | 
|  | 1147 | struct scrub_block *sblock_good, | 
|  | 1148 | int force_write) | 
|  | 1149 | { | 
|  | 1150 | int page_num; | 
|  | 1151 | int ret = 0; | 
|  | 1152 |  | 
|  | 1153 | for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { | 
|  | 1154 | int ret_sub; | 
|  | 1155 |  | 
|  | 1156 | ret_sub = scrub_repair_page_from_good_copy(sblock_bad, | 
|  | 1157 | sblock_good, | 
|  | 1158 | page_num, | 
|  | 1159 | force_write); | 
|  | 1160 | if (ret_sub) | 
|  | 1161 | ret = ret_sub; | 
|  | 1162 | } | 
|  | 1163 |  | 
|  | 1164 | return ret; | 
|  | 1165 | } | 
|  | 1166 |  | 
|  | 1167 | static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | 
|  | 1168 | struct scrub_block *sblock_good, | 
|  | 1169 | int page_num, int force_write) | 
|  | 1170 | { | 
|  | 1171 | struct scrub_page *page_bad = sblock_bad->pagev + page_num; | 
|  | 1172 | struct scrub_page *page_good = sblock_good->pagev + page_num; | 
|  | 1173 |  | 
|  | 1174 | BUG_ON(sblock_bad->pagev[page_num].page == NULL); | 
|  | 1175 | BUG_ON(sblock_good->pagev[page_num].page == NULL); | 
|  | 1176 | if (force_write || sblock_bad->header_error || | 
|  | 1177 | sblock_bad->checksum_error || page_bad->io_error) { | 
|  | 1178 | struct bio *bio; | 
|  | 1179 | int ret; | 
|  | 1180 | DECLARE_COMPLETION_ONSTACK(complete); | 
|  | 1181 |  | 
|  | 1182 | bio = bio_alloc(GFP_NOFS, 1); | 
| Tsutomu Itoh | e627ee7 | 2012-04-12 16:03:56 -0400 | [diff] [blame] | 1183 | if (!bio) | 
|  | 1184 | return -EIO; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1185 | bio->bi_bdev = page_bad->bdev; | 
|  | 1186 | bio->bi_sector = page_bad->physical >> 9; | 
|  | 1187 | bio->bi_end_io = scrub_complete_bio_end_io; | 
|  | 1188 | bio->bi_private = &complete; | 
|  | 1189 |  | 
|  | 1190 | ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); | 
|  | 1191 | if (PAGE_SIZE != ret) { | 
|  | 1192 | bio_put(bio); | 
|  | 1193 | return -EIO; | 
|  | 1194 | } | 
|  | 1195 | btrfsic_submit_bio(WRITE, bio); | 
|  | 1196 |  | 
|  | 1197 | /* this will also unplug the queue */ | 
|  | 1198 | wait_for_completion(&complete); | 
|  | 1199 | bio_put(bio); | 
|  | 1200 | } | 
|  | 1201 |  | 
|  | 1202 | return 0; | 
|  | 1203 | } | 
|  | 1204 |  | 
|  | 1205 | static void scrub_checksum(struct scrub_block *sblock) | 
|  | 1206 | { | 
|  | 1207 | u64 flags; | 
|  | 1208 | int ret; | 
|  | 1209 |  | 
|  | 1210 | BUG_ON(sblock->page_count < 1); | 
|  | 1211 | flags = sblock->pagev[0].flags; | 
|  | 1212 | ret = 0; | 
|  | 1213 | if (flags & BTRFS_EXTENT_FLAG_DATA) | 
|  | 1214 | ret = scrub_checksum_data(sblock); | 
|  | 1215 | else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) | 
|  | 1216 | ret = scrub_checksum_tree_block(sblock); | 
|  | 1217 | else if (flags & BTRFS_EXTENT_FLAG_SUPER) | 
|  | 1218 | (void)scrub_checksum_super(sblock); | 
|  | 1219 | else | 
|  | 1220 | WARN_ON(1); | 
|  | 1221 | if (ret) | 
|  | 1222 | scrub_handle_errored_block(sblock); | 
|  | 1223 | } | 
|  | 1224 |  | 
|  | 1225 | static int scrub_checksum_data(struct scrub_block *sblock) | 
|  | 1226 | { | 
|  | 1227 | struct scrub_dev *sdev = sblock->sdev; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1228 | u8 csum[BTRFS_CSUM_SIZE]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1229 | u8 *on_disk_csum; | 
|  | 1230 | struct page *page; | 
|  | 1231 | void *buffer; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1232 | u32 crc = ~(u32)0; | 
|  | 1233 | int fail = 0; | 
|  | 1234 | struct btrfs_root *root = sdev->dev->dev_root; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1235 | u64 len; | 
|  | 1236 | int index; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1237 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1238 | BUG_ON(sblock->page_count < 1); | 
|  | 1239 | if (!sblock->pagev[0].have_csum) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1240 | return 0; | 
|  | 1241 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1242 | on_disk_csum = sblock->pagev[0].csum; | 
|  | 1243 | page = sblock->pagev[0].page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1244 | buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1245 |  | 
|  | 1246 | len = sdev->sectorsize; | 
|  | 1247 | index = 0; | 
|  | 1248 | for (;;) { | 
|  | 1249 | u64 l = min_t(u64, len, PAGE_SIZE); | 
|  | 1250 |  | 
|  | 1251 | crc = btrfs_csum_data(root, buffer, crc, l); | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1252 | kunmap_atomic(buffer); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1253 | len -= l; | 
|  | 1254 | if (len == 0) | 
|  | 1255 | break; | 
|  | 1256 | index++; | 
|  | 1257 | BUG_ON(index >= sblock->page_count); | 
|  | 1258 | BUG_ON(!sblock->pagev[index].page); | 
|  | 1259 | page = sblock->pagev[index].page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1260 | buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1261 | } | 
|  | 1262 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1263 | btrfs_csum_final(crc, csum); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1264 | if (memcmp(csum, on_disk_csum, sdev->csum_size)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1265 | fail = 1; | 
|  | 1266 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1267 | return fail; | 
|  | 1268 | } | 
|  | 1269 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1270 | static int scrub_checksum_tree_block(struct scrub_block *sblock) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1271 | { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1272 | struct scrub_dev *sdev = sblock->sdev; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1273 | struct btrfs_header *h; | 
|  | 1274 | struct btrfs_root *root = sdev->dev->dev_root; | 
|  | 1275 | struct btrfs_fs_info *fs_info = root->fs_info; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1276 | u8 calculated_csum[BTRFS_CSUM_SIZE]; | 
|  | 1277 | u8 on_disk_csum[BTRFS_CSUM_SIZE]; | 
|  | 1278 | struct page *page; | 
|  | 1279 | void *mapped_buffer; | 
|  | 1280 | u64 mapped_size; | 
|  | 1281 | void *p; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1282 | u32 crc = ~(u32)0; | 
|  | 1283 | int fail = 0; | 
|  | 1284 | int crc_fail = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1285 | u64 len; | 
|  | 1286 | int index; | 
|  | 1287 |  | 
|  | 1288 | BUG_ON(sblock->page_count < 1); | 
|  | 1289 | page = sblock->pagev[0].page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1290 | mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1291 | h = (struct btrfs_header *)mapped_buffer; | 
|  | 1292 | memcpy(on_disk_csum, h->csum, sdev->csum_size); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1293 |  | 
|  | 1294 | /* | 
|  | 1295 | * we don't use the getter functions here, as we | 
|  | 1296 | * a) don't have an extent buffer and | 
|  | 1297 | * b) the page is already kmapped | 
|  | 1298 | */ | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1299 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1300 | if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1301 | ++fail; | 
|  | 1302 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1303 | if (sblock->pagev[0].generation != le64_to_cpu(h->generation)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1304 | ++fail; | 
|  | 1305 |  | 
|  | 1306 | if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) | 
|  | 1307 | ++fail; | 
|  | 1308 |  | 
|  | 1309 | if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, | 
|  | 1310 | BTRFS_UUID_SIZE)) | 
|  | 1311 | ++fail; | 
|  | 1312 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1313 | BUG_ON(sdev->nodesize != sdev->leafsize); | 
|  | 1314 | len = sdev->nodesize - BTRFS_CSUM_SIZE; | 
|  | 1315 | mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; | 
|  | 1316 | p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; | 
|  | 1317 | index = 0; | 
|  | 1318 | for (;;) { | 
|  | 1319 | u64 l = min_t(u64, len, mapped_size); | 
|  | 1320 |  | 
|  | 1321 | crc = btrfs_csum_data(root, p, crc, l); | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1322 | kunmap_atomic(mapped_buffer); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1323 | len -= l; | 
|  | 1324 | if (len == 0) | 
|  | 1325 | break; | 
|  | 1326 | index++; | 
|  | 1327 | BUG_ON(index >= sblock->page_count); | 
|  | 1328 | BUG_ON(!sblock->pagev[index].page); | 
|  | 1329 | page = sblock->pagev[index].page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1330 | mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1331 | mapped_size = PAGE_SIZE; | 
|  | 1332 | p = mapped_buffer; | 
|  | 1333 | } | 
|  | 1334 |  | 
|  | 1335 | btrfs_csum_final(crc, calculated_csum); | 
|  | 1336 | if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1337 | ++crc_fail; | 
|  | 1338 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1339 | return fail || crc_fail; | 
|  | 1340 | } | 
|  | 1341 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1342 | static int scrub_checksum_super(struct scrub_block *sblock) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1343 | { | 
|  | 1344 | struct btrfs_super_block *s; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1345 | struct scrub_dev *sdev = sblock->sdev; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1346 | struct btrfs_root *root = sdev->dev->dev_root; | 
|  | 1347 | struct btrfs_fs_info *fs_info = root->fs_info; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1348 | u8 calculated_csum[BTRFS_CSUM_SIZE]; | 
|  | 1349 | u8 on_disk_csum[BTRFS_CSUM_SIZE]; | 
|  | 1350 | struct page *page; | 
|  | 1351 | void *mapped_buffer; | 
|  | 1352 | u64 mapped_size; | 
|  | 1353 | void *p; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1354 | u32 crc = ~(u32)0; | 
|  | 1355 | int fail = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1356 | u64 len; | 
|  | 1357 | int index; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1358 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1359 | BUG_ON(sblock->page_count < 1); | 
|  | 1360 | page = sblock->pagev[0].page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1361 | mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1362 | s = (struct btrfs_super_block *)mapped_buffer; | 
|  | 1363 | memcpy(on_disk_csum, s->csum, sdev->csum_size); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1364 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1365 | if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1366 | ++fail; | 
|  | 1367 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1368 | if (sblock->pagev[0].generation != le64_to_cpu(s->generation)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1369 | ++fail; | 
|  | 1370 |  | 
|  | 1371 | if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) | 
|  | 1372 | ++fail; | 
|  | 1373 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1374 | len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; | 
|  | 1375 | mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; | 
|  | 1376 | p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE; | 
|  | 1377 | index = 0; | 
|  | 1378 | for (;;) { | 
|  | 1379 | u64 l = min_t(u64, len, mapped_size); | 
|  | 1380 |  | 
|  | 1381 | crc = btrfs_csum_data(root, p, crc, l); | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1382 | kunmap_atomic(mapped_buffer); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1383 | len -= l; | 
|  | 1384 | if (len == 0) | 
|  | 1385 | break; | 
|  | 1386 | index++; | 
|  | 1387 | BUG_ON(index >= sblock->page_count); | 
|  | 1388 | BUG_ON(!sblock->pagev[index].page); | 
|  | 1389 | page = sblock->pagev[index].page; | 
| Linus Torvalds | 9613beb | 2012-03-30 12:44:29 -0700 | [diff] [blame] | 1390 | mapped_buffer = kmap_atomic(page); | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1391 | mapped_size = PAGE_SIZE; | 
|  | 1392 | p = mapped_buffer; | 
|  | 1393 | } | 
|  | 1394 |  | 
|  | 1395 | btrfs_csum_final(crc, calculated_csum); | 
|  | 1396 | if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1397 | ++fail; | 
|  | 1398 |  | 
|  | 1399 | if (fail) { | 
|  | 1400 | /* | 
|  | 1401 | * if we find an error in a super block, we just report it. | 
|  | 1402 | * They will get written with the next transaction commit | 
|  | 1403 | * anyway | 
|  | 1404 | */ | 
|  | 1405 | spin_lock(&sdev->stat_lock); | 
|  | 1406 | ++sdev->stat.super_errors; | 
|  | 1407 | spin_unlock(&sdev->stat_lock); | 
|  | 1408 | } | 
|  | 1409 |  | 
|  | 1410 | return fail; | 
|  | 1411 | } | 
|  | 1412 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1413 | static void scrub_block_get(struct scrub_block *sblock) | 
|  | 1414 | { | 
|  | 1415 | atomic_inc(&sblock->ref_count); | 
|  | 1416 | } | 
|  | 1417 |  | 
|  | 1418 | static void scrub_block_put(struct scrub_block *sblock) | 
|  | 1419 | { | 
|  | 1420 | if (atomic_dec_and_test(&sblock->ref_count)) { | 
|  | 1421 | int i; | 
|  | 1422 |  | 
|  | 1423 | for (i = 0; i < sblock->page_count; i++) | 
|  | 1424 | if (sblock->pagev[i].page) | 
|  | 1425 | __free_page(sblock->pagev[i].page); | 
|  | 1426 | kfree(sblock); | 
|  | 1427 | } | 
|  | 1428 | } | 
|  | 1429 |  | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 1430 | static void scrub_submit(struct scrub_dev *sdev) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1431 | { | 
|  | 1432 | struct scrub_bio *sbio; | 
|  | 1433 |  | 
|  | 1434 | if (sdev->curr == -1) | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 1435 | return; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1436 |  | 
|  | 1437 | sbio = sdev->bios[sdev->curr]; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1438 | sdev->curr = -1; | 
|  | 1439 | atomic_inc(&sdev->in_flight); | 
|  | 1440 |  | 
| Stefan Behrens | 21adbd5 | 2011-11-09 13:44:05 +0100 | [diff] [blame] | 1441 | btrfsic_submit_bio(READ, sbio->bio); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1442 | } | 
|  | 1443 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1444 | static int scrub_add_page_to_bio(struct scrub_dev *sdev, | 
|  | 1445 | struct scrub_page *spage) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1446 | { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1447 | struct scrub_block *sblock = spage->sblock; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1448 | struct scrub_bio *sbio; | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 1449 | int ret; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1450 |  | 
|  | 1451 | again: | 
|  | 1452 | /* | 
|  | 1453 | * grab a fresh bio or wait for one to become available | 
|  | 1454 | */ | 
|  | 1455 | while (sdev->curr == -1) { | 
|  | 1456 | spin_lock(&sdev->list_lock); | 
|  | 1457 | sdev->curr = sdev->first_free; | 
|  | 1458 | if (sdev->curr != -1) { | 
|  | 1459 | sdev->first_free = sdev->bios[sdev->curr]->next_free; | 
|  | 1460 | sdev->bios[sdev->curr]->next_free = -1; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1461 | sdev->bios[sdev->curr]->page_count = 0; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1462 | spin_unlock(&sdev->list_lock); | 
|  | 1463 | } else { | 
|  | 1464 | spin_unlock(&sdev->list_lock); | 
|  | 1465 | wait_event(sdev->list_wait, sdev->first_free != -1); | 
|  | 1466 | } | 
|  | 1467 | } | 
|  | 1468 | sbio = sdev->bios[sdev->curr]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1469 | if (sbio->page_count == 0) { | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 1470 | struct bio *bio; | 
|  | 1471 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1472 | sbio->physical = spage->physical; | 
|  | 1473 | sbio->logical = spage->logical; | 
|  | 1474 | bio = sbio->bio; | 
|  | 1475 | if (!bio) { | 
|  | 1476 | bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio); | 
|  | 1477 | if (!bio) | 
|  | 1478 | return -ENOMEM; | 
|  | 1479 | sbio->bio = bio; | 
|  | 1480 | } | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 1481 |  | 
|  | 1482 | bio->bi_private = sbio; | 
|  | 1483 | bio->bi_end_io = scrub_bio_end_io; | 
|  | 1484 | bio->bi_bdev = sdev->dev->bdev; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1485 | bio->bi_sector = spage->physical >> 9; | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 1486 | sbio->err = 0; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1487 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 
|  | 1488 | spage->physical || | 
|  | 1489 | sbio->logical + sbio->page_count * PAGE_SIZE != | 
|  | 1490 | spage->logical) { | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 1491 | scrub_submit(sdev); | 
| Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 1492 | goto again; | 
|  | 1493 | } | 
|  | 1494 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1495 | sbio->pagev[sbio->page_count] = spage; | 
|  | 1496 | ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0); | 
|  | 1497 | if (ret != PAGE_SIZE) { | 
|  | 1498 | if (sbio->page_count < 1) { | 
|  | 1499 | bio_put(sbio->bio); | 
|  | 1500 | sbio->bio = NULL; | 
|  | 1501 | return -EIO; | 
|  | 1502 | } | 
|  | 1503 | scrub_submit(sdev); | 
|  | 1504 | goto again; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1505 | } | 
| Arne Jansen | 1bc8779 | 2011-05-28 21:57:55 +0200 | [diff] [blame] | 1506 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1507 | scrub_block_get(sblock); /* one for the added page */ | 
|  | 1508 | atomic_inc(&sblock->outstanding_pages); | 
|  | 1509 | sbio->page_count++; | 
|  | 1510 | if (sbio->page_count == sdev->pages_per_bio) | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 1511 | scrub_submit(sdev); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1512 |  | 
|  | 1513 | return 0; | 
|  | 1514 | } | 
|  | 1515 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1516 | static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len, | 
|  | 1517 | u64 physical, u64 flags, u64 gen, int mirror_num, | 
|  | 1518 | u8 *csum, int force) | 
|  | 1519 | { | 
|  | 1520 | struct scrub_block *sblock; | 
|  | 1521 | int index; | 
|  | 1522 |  | 
|  | 1523 | sblock = kzalloc(sizeof(*sblock), GFP_NOFS); | 
|  | 1524 | if (!sblock) { | 
|  | 1525 | spin_lock(&sdev->stat_lock); | 
|  | 1526 | sdev->stat.malloc_errors++; | 
|  | 1527 | spin_unlock(&sdev->stat_lock); | 
|  | 1528 | return -ENOMEM; | 
|  | 1529 | } | 
|  | 1530 |  | 
|  | 1531 | /* one ref inside this function, plus one for each page later on */ | 
|  | 1532 | atomic_set(&sblock->ref_count, 1); | 
|  | 1533 | sblock->sdev = sdev; | 
|  | 1534 | sblock->no_io_error_seen = 1; | 
|  | 1535 |  | 
|  | 1536 | for (index = 0; len > 0; index++) { | 
|  | 1537 | struct scrub_page *spage = sblock->pagev + index; | 
|  | 1538 | u64 l = min_t(u64, len, PAGE_SIZE); | 
|  | 1539 |  | 
|  | 1540 | BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); | 
|  | 1541 | spage->page = alloc_page(GFP_NOFS); | 
|  | 1542 | if (!spage->page) { | 
|  | 1543 | spin_lock(&sdev->stat_lock); | 
|  | 1544 | sdev->stat.malloc_errors++; | 
|  | 1545 | spin_unlock(&sdev->stat_lock); | 
|  | 1546 | while (index > 0) { | 
|  | 1547 | index--; | 
|  | 1548 | __free_page(sblock->pagev[index].page); | 
|  | 1549 | } | 
|  | 1550 | kfree(sblock); | 
|  | 1551 | return -ENOMEM; | 
|  | 1552 | } | 
|  | 1553 | spage->sblock = sblock; | 
|  | 1554 | spage->bdev = sdev->dev->bdev; | 
|  | 1555 | spage->flags = flags; | 
|  | 1556 | spage->generation = gen; | 
|  | 1557 | spage->logical = logical; | 
|  | 1558 | spage->physical = physical; | 
|  | 1559 | spage->mirror_num = mirror_num; | 
|  | 1560 | if (csum) { | 
|  | 1561 | spage->have_csum = 1; | 
|  | 1562 | memcpy(spage->csum, csum, sdev->csum_size); | 
|  | 1563 | } else { | 
|  | 1564 | spage->have_csum = 0; | 
|  | 1565 | } | 
|  | 1566 | sblock->page_count++; | 
|  | 1567 | len -= l; | 
|  | 1568 | logical += l; | 
|  | 1569 | physical += l; | 
|  | 1570 | } | 
|  | 1571 |  | 
|  | 1572 | BUG_ON(sblock->page_count == 0); | 
|  | 1573 | for (index = 0; index < sblock->page_count; index++) { | 
|  | 1574 | struct scrub_page *spage = sblock->pagev + index; | 
|  | 1575 | int ret; | 
|  | 1576 |  | 
|  | 1577 | ret = scrub_add_page_to_bio(sdev, spage); | 
|  | 1578 | if (ret) { | 
|  | 1579 | scrub_block_put(sblock); | 
|  | 1580 | return ret; | 
|  | 1581 | } | 
|  | 1582 | } | 
|  | 1583 |  | 
|  | 1584 | if (force) | 
|  | 1585 | scrub_submit(sdev); | 
|  | 1586 |  | 
|  | 1587 | /* last one frees, either here or in bio completion for last page */ | 
|  | 1588 | scrub_block_put(sblock); | 
|  | 1589 | return 0; | 
|  | 1590 | } | 
|  | 1591 |  | 
|  | 1592 | static void scrub_bio_end_io(struct bio *bio, int err) | 
|  | 1593 | { | 
|  | 1594 | struct scrub_bio *sbio = bio->bi_private; | 
|  | 1595 | struct scrub_dev *sdev = sbio->sdev; | 
|  | 1596 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; | 
|  | 1597 |  | 
|  | 1598 | sbio->err = err; | 
|  | 1599 | sbio->bio = bio; | 
|  | 1600 |  | 
|  | 1601 | btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); | 
|  | 1602 | } | 
|  | 1603 |  | 
|  | 1604 | static void scrub_bio_end_io_worker(struct btrfs_work *work) | 
|  | 1605 | { | 
|  | 1606 | struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); | 
|  | 1607 | struct scrub_dev *sdev = sbio->sdev; | 
|  | 1608 | int i; | 
|  | 1609 |  | 
|  | 1610 | BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO); | 
|  | 1611 | if (sbio->err) { | 
|  | 1612 | for (i = 0; i < sbio->page_count; i++) { | 
|  | 1613 | struct scrub_page *spage = sbio->pagev[i]; | 
|  | 1614 |  | 
|  | 1615 | spage->io_error = 1; | 
|  | 1616 | spage->sblock->no_io_error_seen = 0; | 
|  | 1617 | } | 
|  | 1618 | } | 
|  | 1619 |  | 
|  | 1620 | /* now complete the scrub_block items that have all pages completed */ | 
|  | 1621 | for (i = 0; i < sbio->page_count; i++) { | 
|  | 1622 | struct scrub_page *spage = sbio->pagev[i]; | 
|  | 1623 | struct scrub_block *sblock = spage->sblock; | 
|  | 1624 |  | 
|  | 1625 | if (atomic_dec_and_test(&sblock->outstanding_pages)) | 
|  | 1626 | scrub_block_complete(sblock); | 
|  | 1627 | scrub_block_put(sblock); | 
|  | 1628 | } | 
|  | 1629 |  | 
|  | 1630 | if (sbio->err) { | 
|  | 1631 | /* what is this good for??? */ | 
|  | 1632 | sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); | 
|  | 1633 | sbio->bio->bi_flags |= 1 << BIO_UPTODATE; | 
|  | 1634 | sbio->bio->bi_phys_segments = 0; | 
|  | 1635 | sbio->bio->bi_idx = 0; | 
|  | 1636 |  | 
|  | 1637 | for (i = 0; i < sbio->page_count; i++) { | 
|  | 1638 | struct bio_vec *bi; | 
|  | 1639 | bi = &sbio->bio->bi_io_vec[i]; | 
|  | 1640 | bi->bv_offset = 0; | 
|  | 1641 | bi->bv_len = PAGE_SIZE; | 
|  | 1642 | } | 
|  | 1643 | } | 
|  | 1644 |  | 
|  | 1645 | bio_put(sbio->bio); | 
|  | 1646 | sbio->bio = NULL; | 
|  | 1647 | spin_lock(&sdev->list_lock); | 
|  | 1648 | sbio->next_free = sdev->first_free; | 
|  | 1649 | sdev->first_free = sbio->index; | 
|  | 1650 | spin_unlock(&sdev->list_lock); | 
|  | 1651 | atomic_dec(&sdev->in_flight); | 
|  | 1652 | wake_up(&sdev->list_wait); | 
|  | 1653 | } | 
|  | 1654 |  | 
|  | 1655 | static void scrub_block_complete(struct scrub_block *sblock) | 
|  | 1656 | { | 
|  | 1657 | if (!sblock->no_io_error_seen) | 
|  | 1658 | scrub_handle_errored_block(sblock); | 
|  | 1659 | else | 
|  | 1660 | scrub_checksum(sblock); | 
|  | 1661 | } | 
|  | 1662 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1663 | static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, | 
|  | 1664 | u8 *csum) | 
|  | 1665 | { | 
|  | 1666 | struct btrfs_ordered_sum *sum = NULL; | 
|  | 1667 | int ret = 0; | 
|  | 1668 | unsigned long i; | 
|  | 1669 | unsigned long num_sectors; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1670 |  | 
|  | 1671 | while (!list_empty(&sdev->csum_list)) { | 
|  | 1672 | sum = list_first_entry(&sdev->csum_list, | 
|  | 1673 | struct btrfs_ordered_sum, list); | 
|  | 1674 | if (sum->bytenr > logical) | 
|  | 1675 | return 0; | 
|  | 1676 | if (sum->bytenr + sum->len > logical) | 
|  | 1677 | break; | 
|  | 1678 |  | 
|  | 1679 | ++sdev->stat.csum_discards; | 
|  | 1680 | list_del(&sum->list); | 
|  | 1681 | kfree(sum); | 
|  | 1682 | sum = NULL; | 
|  | 1683 | } | 
|  | 1684 | if (!sum) | 
|  | 1685 | return 0; | 
|  | 1686 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1687 | num_sectors = sum->len / sdev->sectorsize; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1688 | for (i = 0; i < num_sectors; ++i) { | 
|  | 1689 | if (sum->sums[i].bytenr == logical) { | 
|  | 1690 | memcpy(csum, &sum->sums[i].sum, sdev->csum_size); | 
|  | 1691 | ret = 1; | 
|  | 1692 | break; | 
|  | 1693 | } | 
|  | 1694 | } | 
|  | 1695 | if (ret && i == num_sectors - 1) { | 
|  | 1696 | list_del(&sum->list); | 
|  | 1697 | kfree(sum); | 
|  | 1698 | } | 
|  | 1699 | return ret; | 
|  | 1700 | } | 
|  | 1701 |  | 
|  | 1702 | /* scrub extent tries to collect up to 64 kB for each bio */ | 
|  | 1703 | static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, | 
| Jan Schmidt | e12fa9c | 2011-06-17 15:55:21 +0200 | [diff] [blame] | 1704 | u64 physical, u64 flags, u64 gen, int mirror_num) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1705 | { | 
|  | 1706 | int ret; | 
|  | 1707 | u8 csum[BTRFS_CSUM_SIZE]; | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1708 | u32 blocksize; | 
|  | 1709 |  | 
|  | 1710 | if (flags & BTRFS_EXTENT_FLAG_DATA) { | 
|  | 1711 | blocksize = sdev->sectorsize; | 
|  | 1712 | spin_lock(&sdev->stat_lock); | 
|  | 1713 | sdev->stat.data_extents_scrubbed++; | 
|  | 1714 | sdev->stat.data_bytes_scrubbed += len; | 
|  | 1715 | spin_unlock(&sdev->stat_lock); | 
|  | 1716 | } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 
|  | 1717 | BUG_ON(sdev->nodesize != sdev->leafsize); | 
|  | 1718 | blocksize = sdev->nodesize; | 
|  | 1719 | spin_lock(&sdev->stat_lock); | 
|  | 1720 | sdev->stat.tree_extents_scrubbed++; | 
|  | 1721 | sdev->stat.tree_bytes_scrubbed += len; | 
|  | 1722 | spin_unlock(&sdev->stat_lock); | 
|  | 1723 | } else { | 
|  | 1724 | blocksize = sdev->sectorsize; | 
|  | 1725 | BUG_ON(1); | 
|  | 1726 | } | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1727 |  | 
|  | 1728 | while (len) { | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1729 | u64 l = min_t(u64, len, blocksize); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1730 | int have_csum = 0; | 
|  | 1731 |  | 
|  | 1732 | if (flags & BTRFS_EXTENT_FLAG_DATA) { | 
|  | 1733 | /* push csums to sbio */ | 
|  | 1734 | have_csum = scrub_find_csum(sdev, logical, l, csum); | 
|  | 1735 | if (have_csum == 0) | 
|  | 1736 | ++sdev->stat.no_csum; | 
|  | 1737 | } | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1738 | ret = scrub_pages(sdev, logical, l, physical, flags, gen, | 
|  | 1739 | mirror_num, have_csum ? csum : NULL, 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1740 | if (ret) | 
|  | 1741 | return ret; | 
|  | 1742 | len -= l; | 
|  | 1743 | logical += l; | 
|  | 1744 | physical += l; | 
|  | 1745 | } | 
|  | 1746 | return 0; | 
|  | 1747 | } | 
|  | 1748 |  | 
|  | 1749 | static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | 
|  | 1750 | struct map_lookup *map, int num, u64 base, u64 length) | 
|  | 1751 | { | 
|  | 1752 | struct btrfs_path *path; | 
|  | 1753 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; | 
|  | 1754 | struct btrfs_root *root = fs_info->extent_root; | 
|  | 1755 | struct btrfs_root *csum_root = fs_info->csum_root; | 
|  | 1756 | struct btrfs_extent_item *extent; | 
| Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 1757 | struct blk_plug plug; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1758 | u64 flags; | 
|  | 1759 | int ret; | 
|  | 1760 | int slot; | 
|  | 1761 | int i; | 
|  | 1762 | u64 nstripes; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1763 | struct extent_buffer *l; | 
|  | 1764 | struct btrfs_key key; | 
|  | 1765 | u64 physical; | 
|  | 1766 | u64 logical; | 
|  | 1767 | u64 generation; | 
| Jan Schmidt | e12fa9c | 2011-06-17 15:55:21 +0200 | [diff] [blame] | 1768 | int mirror_num; | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1769 | struct reada_control *reada1; | 
|  | 1770 | struct reada_control *reada2; | 
|  | 1771 | struct btrfs_key key_start; | 
|  | 1772 | struct btrfs_key key_end; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1773 |  | 
|  | 1774 | u64 increment = map->stripe_len; | 
|  | 1775 | u64 offset; | 
|  | 1776 |  | 
|  | 1777 | nstripes = length; | 
|  | 1778 | offset = 0; | 
|  | 1779 | do_div(nstripes, map->stripe_len); | 
|  | 1780 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | 
|  | 1781 | offset = map->stripe_len * num; | 
|  | 1782 | increment = map->stripe_len * map->num_stripes; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 1783 | mirror_num = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1784 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | 
|  | 1785 | int factor = map->num_stripes / map->sub_stripes; | 
|  | 1786 | offset = map->stripe_len * (num / map->sub_stripes); | 
|  | 1787 | increment = map->stripe_len * factor; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 1788 | mirror_num = num % map->sub_stripes + 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1789 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 
|  | 1790 | increment = map->stripe_len; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 1791 | mirror_num = num % map->num_stripes + 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1792 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | 
|  | 1793 | increment = map->stripe_len; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 1794 | mirror_num = num % map->num_stripes + 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1795 | } else { | 
|  | 1796 | increment = map->stripe_len; | 
| Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 1797 | mirror_num = 1; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1798 | } | 
|  | 1799 |  | 
|  | 1800 | path = btrfs_alloc_path(); | 
|  | 1801 | if (!path) | 
|  | 1802 | return -ENOMEM; | 
|  | 1803 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1804 | /* | 
|  | 1805 | * work on commit root. The related disk blocks are static as | 
|  | 1806 | * long as COW is applied. This means, it is save to rewrite | 
|  | 1807 | * them to repair disk errors without any race conditions | 
|  | 1808 | */ | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1809 | path->search_commit_root = 1; | 
|  | 1810 | path->skip_locking = 1; | 
|  | 1811 |  | 
|  | 1812 | /* | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1813 | * trigger the readahead for extent tree csum tree and wait for | 
|  | 1814 | * completion. During readahead, the scrub is officially paused | 
|  | 1815 | * to not hold off transaction commits | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1816 | */ | 
|  | 1817 | logical = base + offset; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1818 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1819 | wait_event(sdev->list_wait, | 
|  | 1820 | atomic_read(&sdev->in_flight) == 0); | 
|  | 1821 | atomic_inc(&fs_info->scrubs_paused); | 
|  | 1822 | wake_up(&fs_info->scrub_pause_wait); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1823 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1824 | /* FIXME it might be better to start readahead at commit root */ | 
|  | 1825 | key_start.objectid = logical; | 
|  | 1826 | key_start.type = BTRFS_EXTENT_ITEM_KEY; | 
|  | 1827 | key_start.offset = (u64)0; | 
|  | 1828 | key_end.objectid = base + offset + nstripes * increment; | 
|  | 1829 | key_end.type = BTRFS_EXTENT_ITEM_KEY; | 
|  | 1830 | key_end.offset = (u64)0; | 
|  | 1831 | reada1 = btrfs_reada_add(root, &key_start, &key_end); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1832 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1833 | key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | 
|  | 1834 | key_start.type = BTRFS_EXTENT_CSUM_KEY; | 
|  | 1835 | key_start.offset = logical; | 
|  | 1836 | key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | 
|  | 1837 | key_end.type = BTRFS_EXTENT_CSUM_KEY; | 
|  | 1838 | key_end.offset = base + offset + nstripes * increment; | 
|  | 1839 | reada2 = btrfs_reada_add(csum_root, &key_start, &key_end); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1840 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1841 | if (!IS_ERR(reada1)) | 
|  | 1842 | btrfs_reada_wait(reada1); | 
|  | 1843 | if (!IS_ERR(reada2)) | 
|  | 1844 | btrfs_reada_wait(reada2); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1845 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1846 | mutex_lock(&fs_info->scrub_lock); | 
|  | 1847 | while (atomic_read(&fs_info->scrub_pause_req)) { | 
|  | 1848 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 1849 | wait_event(fs_info->scrub_pause_wait, | 
|  | 1850 | atomic_read(&fs_info->scrub_pause_req) == 0); | 
|  | 1851 | mutex_lock(&fs_info->scrub_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1852 | } | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1853 | atomic_dec(&fs_info->scrubs_paused); | 
|  | 1854 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 1855 | wake_up(&fs_info->scrub_pause_wait); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1856 |  | 
|  | 1857 | /* | 
|  | 1858 | * collect all data csums for the stripe to avoid seeking during | 
|  | 1859 | * the scrub. This might currently (crc32) end up to be about 1MB | 
|  | 1860 | */ | 
| Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 1861 | blk_start_plug(&plug); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1862 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1863 | /* | 
|  | 1864 | * now find all extents for each stripe and scrub them | 
|  | 1865 | */ | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1866 | logical = base + offset; | 
|  | 1867 | physical = map->stripes[num].physical; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1868 | ret = 0; | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1869 | for (i = 0; i < nstripes; ++i) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1870 | /* | 
|  | 1871 | * canceled? | 
|  | 1872 | */ | 
|  | 1873 | if (atomic_read(&fs_info->scrub_cancel_req) || | 
|  | 1874 | atomic_read(&sdev->cancel_req)) { | 
|  | 1875 | ret = -ECANCELED; | 
|  | 1876 | goto out; | 
|  | 1877 | } | 
|  | 1878 | /* | 
|  | 1879 | * check to see if we have to pause | 
|  | 1880 | */ | 
|  | 1881 | if (atomic_read(&fs_info->scrub_pause_req)) { | 
|  | 1882 | /* push queued extents */ | 
|  | 1883 | scrub_submit(sdev); | 
|  | 1884 | wait_event(sdev->list_wait, | 
|  | 1885 | atomic_read(&sdev->in_flight) == 0); | 
|  | 1886 | atomic_inc(&fs_info->scrubs_paused); | 
|  | 1887 | wake_up(&fs_info->scrub_pause_wait); | 
|  | 1888 | mutex_lock(&fs_info->scrub_lock); | 
|  | 1889 | while (atomic_read(&fs_info->scrub_pause_req)) { | 
|  | 1890 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 1891 | wait_event(fs_info->scrub_pause_wait, | 
|  | 1892 | atomic_read(&fs_info->scrub_pause_req) == 0); | 
|  | 1893 | mutex_lock(&fs_info->scrub_lock); | 
|  | 1894 | } | 
|  | 1895 | atomic_dec(&fs_info->scrubs_paused); | 
|  | 1896 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 1897 | wake_up(&fs_info->scrub_pause_wait); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1898 | } | 
|  | 1899 |  | 
| Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 1900 | ret = btrfs_lookup_csums_range(csum_root, logical, | 
|  | 1901 | logical + map->stripe_len - 1, | 
|  | 1902 | &sdev->csum_list, 1); | 
|  | 1903 | if (ret) | 
|  | 1904 | goto out; | 
|  | 1905 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1906 | key.objectid = logical; | 
|  | 1907 | key.type = BTRFS_EXTENT_ITEM_KEY; | 
|  | 1908 | key.offset = (u64)0; | 
|  | 1909 |  | 
|  | 1910 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
|  | 1911 | if (ret < 0) | 
|  | 1912 | goto out; | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 1913 | if (ret > 0) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1914 | ret = btrfs_previous_item(root, path, 0, | 
|  | 1915 | BTRFS_EXTENT_ITEM_KEY); | 
|  | 1916 | if (ret < 0) | 
|  | 1917 | goto out; | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 1918 | if (ret > 0) { | 
|  | 1919 | /* there's no smaller item, so stick with the | 
|  | 1920 | * larger one */ | 
|  | 1921 | btrfs_release_path(path); | 
|  | 1922 | ret = btrfs_search_slot(NULL, root, &key, | 
|  | 1923 | path, 0, 0); | 
|  | 1924 | if (ret < 0) | 
|  | 1925 | goto out; | 
|  | 1926 | } | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1927 | } | 
|  | 1928 |  | 
|  | 1929 | while (1) { | 
|  | 1930 | l = path->nodes[0]; | 
|  | 1931 | slot = path->slots[0]; | 
|  | 1932 | if (slot >= btrfs_header_nritems(l)) { | 
|  | 1933 | ret = btrfs_next_leaf(root, path); | 
|  | 1934 | if (ret == 0) | 
|  | 1935 | continue; | 
|  | 1936 | if (ret < 0) | 
|  | 1937 | goto out; | 
|  | 1938 |  | 
|  | 1939 | break; | 
|  | 1940 | } | 
|  | 1941 | btrfs_item_key_to_cpu(l, &key, slot); | 
|  | 1942 |  | 
|  | 1943 | if (key.objectid + key.offset <= logical) | 
|  | 1944 | goto next; | 
|  | 1945 |  | 
|  | 1946 | if (key.objectid >= logical + map->stripe_len) | 
|  | 1947 | break; | 
|  | 1948 |  | 
|  | 1949 | if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) | 
|  | 1950 | goto next; | 
|  | 1951 |  | 
|  | 1952 | extent = btrfs_item_ptr(l, slot, | 
|  | 1953 | struct btrfs_extent_item); | 
|  | 1954 | flags = btrfs_extent_flags(l, extent); | 
|  | 1955 | generation = btrfs_extent_generation(l, extent); | 
|  | 1956 |  | 
|  | 1957 | if (key.objectid < logical && | 
|  | 1958 | (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { | 
|  | 1959 | printk(KERN_ERR | 
|  | 1960 | "btrfs scrub: tree block %llu spanning " | 
|  | 1961 | "stripes, ignored. logical=%llu\n", | 
|  | 1962 | (unsigned long long)key.objectid, | 
|  | 1963 | (unsigned long long)logical); | 
|  | 1964 | goto next; | 
|  | 1965 | } | 
|  | 1966 |  | 
|  | 1967 | /* | 
|  | 1968 | * trim extent to this stripe | 
|  | 1969 | */ | 
|  | 1970 | if (key.objectid < logical) { | 
|  | 1971 | key.offset -= logical - key.objectid; | 
|  | 1972 | key.objectid = logical; | 
|  | 1973 | } | 
|  | 1974 | if (key.objectid + key.offset > | 
|  | 1975 | logical + map->stripe_len) { | 
|  | 1976 | key.offset = logical + map->stripe_len - | 
|  | 1977 | key.objectid; | 
|  | 1978 | } | 
|  | 1979 |  | 
|  | 1980 | ret = scrub_extent(sdev, key.objectid, key.offset, | 
|  | 1981 | key.objectid - logical + physical, | 
|  | 1982 | flags, generation, mirror_num); | 
|  | 1983 | if (ret) | 
|  | 1984 | goto out; | 
|  | 1985 |  | 
|  | 1986 | next: | 
|  | 1987 | path->slots[0]++; | 
|  | 1988 | } | 
| Chris Mason | 7126733 | 2011-05-23 06:30:52 -0400 | [diff] [blame] | 1989 | btrfs_release_path(path); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1990 | logical += increment; | 
|  | 1991 | physical += map->stripe_len; | 
|  | 1992 | spin_lock(&sdev->stat_lock); | 
|  | 1993 | sdev->stat.last_physical = physical; | 
|  | 1994 | spin_unlock(&sdev->stat_lock); | 
|  | 1995 | } | 
|  | 1996 | /* push queued extents */ | 
|  | 1997 | scrub_submit(sdev); | 
|  | 1998 |  | 
|  | 1999 | out: | 
| Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 2000 | blk_finish_plug(&plug); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2001 | btrfs_free_path(path); | 
|  | 2002 | return ret < 0 ? ret : 0; | 
|  | 2003 | } | 
|  | 2004 |  | 
|  | 2005 | static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, | 
| Arne Jansen | 859acaf | 2012-02-09 15:09:02 +0100 | [diff] [blame] | 2006 | u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length, | 
|  | 2007 | u64 dev_offset) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2008 | { | 
|  | 2009 | struct btrfs_mapping_tree *map_tree = | 
|  | 2010 | &sdev->dev->dev_root->fs_info->mapping_tree; | 
|  | 2011 | struct map_lookup *map; | 
|  | 2012 | struct extent_map *em; | 
|  | 2013 | int i; | 
|  | 2014 | int ret = -EINVAL; | 
|  | 2015 |  | 
|  | 2016 | read_lock(&map_tree->map_tree.lock); | 
|  | 2017 | em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); | 
|  | 2018 | read_unlock(&map_tree->map_tree.lock); | 
|  | 2019 |  | 
|  | 2020 | if (!em) | 
|  | 2021 | return -EINVAL; | 
|  | 2022 |  | 
|  | 2023 | map = (struct map_lookup *)em->bdev; | 
|  | 2024 | if (em->start != chunk_offset) | 
|  | 2025 | goto out; | 
|  | 2026 |  | 
|  | 2027 | if (em->len < length) | 
|  | 2028 | goto out; | 
|  | 2029 |  | 
|  | 2030 | for (i = 0; i < map->num_stripes; ++i) { | 
| Arne Jansen | 859acaf | 2012-02-09 15:09:02 +0100 | [diff] [blame] | 2031 | if (map->stripes[i].dev == sdev->dev && | 
|  | 2032 | map->stripes[i].physical == dev_offset) { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2033 | ret = scrub_stripe(sdev, map, i, chunk_offset, length); | 
|  | 2034 | if (ret) | 
|  | 2035 | goto out; | 
|  | 2036 | } | 
|  | 2037 | } | 
|  | 2038 | out: | 
|  | 2039 | free_extent_map(em); | 
|  | 2040 |  | 
|  | 2041 | return ret; | 
|  | 2042 | } | 
|  | 2043 |  | 
|  | 2044 | static noinline_for_stack | 
|  | 2045 | int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | 
|  | 2046 | { | 
|  | 2047 | struct btrfs_dev_extent *dev_extent = NULL; | 
|  | 2048 | struct btrfs_path *path; | 
|  | 2049 | struct btrfs_root *root = sdev->dev->dev_root; | 
|  | 2050 | struct btrfs_fs_info *fs_info = root->fs_info; | 
|  | 2051 | u64 length; | 
|  | 2052 | u64 chunk_tree; | 
|  | 2053 | u64 chunk_objectid; | 
|  | 2054 | u64 chunk_offset; | 
|  | 2055 | int ret; | 
|  | 2056 | int slot; | 
|  | 2057 | struct extent_buffer *l; | 
|  | 2058 | struct btrfs_key key; | 
|  | 2059 | struct btrfs_key found_key; | 
|  | 2060 | struct btrfs_block_group_cache *cache; | 
|  | 2061 |  | 
|  | 2062 | path = btrfs_alloc_path(); | 
|  | 2063 | if (!path) | 
|  | 2064 | return -ENOMEM; | 
|  | 2065 |  | 
|  | 2066 | path->reada = 2; | 
|  | 2067 | path->search_commit_root = 1; | 
|  | 2068 | path->skip_locking = 1; | 
|  | 2069 |  | 
|  | 2070 | key.objectid = sdev->dev->devid; | 
|  | 2071 | key.offset = 0ull; | 
|  | 2072 | key.type = BTRFS_DEV_EXTENT_KEY; | 
|  | 2073 |  | 
|  | 2074 |  | 
|  | 2075 | while (1) { | 
|  | 2076 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
|  | 2077 | if (ret < 0) | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 2078 | break; | 
|  | 2079 | if (ret > 0) { | 
|  | 2080 | if (path->slots[0] >= | 
|  | 2081 | btrfs_header_nritems(path->nodes[0])) { | 
|  | 2082 | ret = btrfs_next_leaf(root, path); | 
|  | 2083 | if (ret) | 
|  | 2084 | break; | 
|  | 2085 | } | 
|  | 2086 | } | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2087 |  | 
|  | 2088 | l = path->nodes[0]; | 
|  | 2089 | slot = path->slots[0]; | 
|  | 2090 |  | 
|  | 2091 | btrfs_item_key_to_cpu(l, &found_key, slot); | 
|  | 2092 |  | 
|  | 2093 | if (found_key.objectid != sdev->dev->devid) | 
|  | 2094 | break; | 
|  | 2095 |  | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 2096 | if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2097 | break; | 
|  | 2098 |  | 
|  | 2099 | if (found_key.offset >= end) | 
|  | 2100 | break; | 
|  | 2101 |  | 
|  | 2102 | if (found_key.offset < key.offset) | 
|  | 2103 | break; | 
|  | 2104 |  | 
|  | 2105 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); | 
|  | 2106 | length = btrfs_dev_extent_length(l, dev_extent); | 
|  | 2107 |  | 
|  | 2108 | if (found_key.offset + length <= start) { | 
|  | 2109 | key.offset = found_key.offset + length; | 
| Chris Mason | 7126733 | 2011-05-23 06:30:52 -0400 | [diff] [blame] | 2110 | btrfs_release_path(path); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2111 | continue; | 
|  | 2112 | } | 
|  | 2113 |  | 
|  | 2114 | chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); | 
|  | 2115 | chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); | 
|  | 2116 | chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); | 
|  | 2117 |  | 
|  | 2118 | /* | 
|  | 2119 | * get a reference on the corresponding block group to prevent | 
|  | 2120 | * the chunk from going away while we scrub it | 
|  | 2121 | */ | 
|  | 2122 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); | 
|  | 2123 | if (!cache) { | 
|  | 2124 | ret = -ENOENT; | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 2125 | break; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2126 | } | 
|  | 2127 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, | 
| Arne Jansen | 859acaf | 2012-02-09 15:09:02 +0100 | [diff] [blame] | 2128 | chunk_offset, length, found_key.offset); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2129 | btrfs_put_block_group(cache); | 
|  | 2130 | if (ret) | 
|  | 2131 | break; | 
|  | 2132 |  | 
|  | 2133 | key.offset = found_key.offset + length; | 
| Chris Mason | 7126733 | 2011-05-23 06:30:52 -0400 | [diff] [blame] | 2134 | btrfs_release_path(path); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2135 | } | 
|  | 2136 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2137 | btrfs_free_path(path); | 
| Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 2138 |  | 
|  | 2139 | /* | 
|  | 2140 | * ret can still be 1 from search_slot or next_leaf, | 
|  | 2141 | * that's not an error | 
|  | 2142 | */ | 
|  | 2143 | return ret < 0 ? ret : 0; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2144 | } | 
|  | 2145 |  | 
|  | 2146 | static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) | 
|  | 2147 | { | 
|  | 2148 | int	i; | 
|  | 2149 | u64	bytenr; | 
|  | 2150 | u64	gen; | 
|  | 2151 | int	ret; | 
|  | 2152 | struct btrfs_device *device = sdev->dev; | 
|  | 2153 | struct btrfs_root *root = device->dev_root; | 
|  | 2154 |  | 
| Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 2155 | if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) | 
|  | 2156 | return -EIO; | 
|  | 2157 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2158 | gen = root->fs_info->last_trans_committed; | 
|  | 2159 |  | 
|  | 2160 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | 
|  | 2161 | bytenr = btrfs_sb_offset(i); | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 2162 | if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2163 | break; | 
|  | 2164 |  | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2165 | ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, | 
|  | 2166 | BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2167 | if (ret) | 
|  | 2168 | return ret; | 
|  | 2169 | } | 
|  | 2170 | wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); | 
|  | 2171 |  | 
|  | 2172 | return 0; | 
|  | 2173 | } | 
|  | 2174 |  | 
|  | 2175 | /* | 
|  | 2176 | * get a reference count on fs_info->scrub_workers. start worker if necessary | 
|  | 2177 | */ | 
|  | 2178 | static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) | 
|  | 2179 | { | 
|  | 2180 | struct btrfs_fs_info *fs_info = root->fs_info; | 
| Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 2181 | int ret = 0; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2182 |  | 
|  | 2183 | mutex_lock(&fs_info->scrub_lock); | 
| Arne Jansen | 632dd77 | 2011-06-10 12:07:07 +0200 | [diff] [blame] | 2184 | if (fs_info->scrub_workers_refcnt == 0) { | 
|  | 2185 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", | 
|  | 2186 | fs_info->thread_pool_size, &fs_info->generic_worker); | 
|  | 2187 | fs_info->scrub_workers.idle_thresh = 4; | 
| Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 2188 | ret = btrfs_start_workers(&fs_info->scrub_workers); | 
|  | 2189 | if (ret) | 
|  | 2190 | goto out; | 
| Arne Jansen | 632dd77 | 2011-06-10 12:07:07 +0200 | [diff] [blame] | 2191 | } | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2192 | ++fs_info->scrub_workers_refcnt; | 
| Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 2193 | out: | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2194 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2195 |  | 
| Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 2196 | return ret; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2197 | } | 
|  | 2198 |  | 
|  | 2199 | static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) | 
|  | 2200 | { | 
|  | 2201 | struct btrfs_fs_info *fs_info = root->fs_info; | 
|  | 2202 |  | 
|  | 2203 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2204 | if (--fs_info->scrub_workers_refcnt == 0) | 
|  | 2205 | btrfs_stop_workers(&fs_info->scrub_workers); | 
|  | 2206 | WARN_ON(fs_info->scrub_workers_refcnt < 0); | 
|  | 2207 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2208 | } | 
|  | 2209 |  | 
|  | 2210 |  | 
|  | 2211 | int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, | 
| Arne Jansen | 8628764 | 2011-03-23 16:34:19 +0100 | [diff] [blame] | 2212 | struct btrfs_scrub_progress *progress, int readonly) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2213 | { | 
|  | 2214 | struct scrub_dev *sdev; | 
|  | 2215 | struct btrfs_fs_info *fs_info = root->fs_info; | 
|  | 2216 | int ret; | 
|  | 2217 | struct btrfs_device *dev; | 
|  | 2218 |  | 
| David Sterba | 7841cb2 | 2011-05-31 18:07:27 +0200 | [diff] [blame] | 2219 | if (btrfs_fs_closing(root->fs_info)) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2220 | return -EINVAL; | 
|  | 2221 |  | 
|  | 2222 | /* | 
|  | 2223 | * check some assumptions | 
|  | 2224 | */ | 
| Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2225 | if (root->nodesize != root->leafsize) { | 
|  | 2226 | printk(KERN_ERR | 
|  | 2227 | "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n", | 
|  | 2228 | root->nodesize, root->leafsize); | 
|  | 2229 | return -EINVAL; | 
|  | 2230 | } | 
|  | 2231 |  | 
|  | 2232 | if (root->nodesize > BTRFS_STRIPE_LEN) { | 
|  | 2233 | /* | 
|  | 2234 | * in this case scrub is unable to calculate the checksum | 
|  | 2235 | * the way scrub is implemented. Do not handle this | 
|  | 2236 | * situation at all because it won't ever happen. | 
|  | 2237 | */ | 
|  | 2238 | printk(KERN_ERR | 
|  | 2239 | "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n", | 
|  | 2240 | root->nodesize, BTRFS_STRIPE_LEN); | 
|  | 2241 | return -EINVAL; | 
|  | 2242 | } | 
|  | 2243 |  | 
|  | 2244 | if (root->sectorsize != PAGE_SIZE) { | 
|  | 2245 | /* not supported for data w/o checksums */ | 
|  | 2246 | printk(KERN_ERR | 
|  | 2247 | "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n", | 
|  | 2248 | root->sectorsize, (unsigned long long)PAGE_SIZE); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2249 | return -EINVAL; | 
|  | 2250 | } | 
|  | 2251 |  | 
|  | 2252 | ret = scrub_workers_get(root); | 
|  | 2253 | if (ret) | 
|  | 2254 | return ret; | 
|  | 2255 |  | 
|  | 2256 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2257 | dev = btrfs_find_device(root, devid, NULL, NULL); | 
|  | 2258 | if (!dev || dev->missing) { | 
|  | 2259 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2260 | scrub_workers_put(root); | 
|  | 2261 | return -ENODEV; | 
|  | 2262 | } | 
|  | 2263 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2264 |  | 
|  | 2265 | if (!dev->in_fs_metadata) { | 
|  | 2266 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2267 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2268 | scrub_workers_put(root); | 
|  | 2269 | return -ENODEV; | 
|  | 2270 | } | 
|  | 2271 |  | 
|  | 2272 | if (dev->scrub_device) { | 
|  | 2273 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2274 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2275 | scrub_workers_put(root); | 
|  | 2276 | return -EINPROGRESS; | 
|  | 2277 | } | 
|  | 2278 | sdev = scrub_setup_dev(dev); | 
|  | 2279 | if (IS_ERR(sdev)) { | 
|  | 2280 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2281 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2282 | scrub_workers_put(root); | 
|  | 2283 | return PTR_ERR(sdev); | 
|  | 2284 | } | 
| Arne Jansen | 8628764 | 2011-03-23 16:34:19 +0100 | [diff] [blame] | 2285 | sdev->readonly = readonly; | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2286 | dev->scrub_device = sdev; | 
|  | 2287 |  | 
|  | 2288 | atomic_inc(&fs_info->scrubs_running); | 
|  | 2289 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2290 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2291 |  | 
|  | 2292 | down_read(&fs_info->scrub_super_lock); | 
|  | 2293 | ret = scrub_supers(sdev); | 
|  | 2294 | up_read(&fs_info->scrub_super_lock); | 
|  | 2295 |  | 
|  | 2296 | if (!ret) | 
|  | 2297 | ret = scrub_enumerate_chunks(sdev, start, end); | 
|  | 2298 |  | 
|  | 2299 | wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2300 | atomic_dec(&fs_info->scrubs_running); | 
|  | 2301 | wake_up(&fs_info->scrub_pause_wait); | 
|  | 2302 |  | 
| Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 2303 | wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0); | 
|  | 2304 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2305 | if (progress) | 
|  | 2306 | memcpy(progress, &sdev->stat, sizeof(*progress)); | 
|  | 2307 |  | 
|  | 2308 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2309 | dev->scrub_device = NULL; | 
|  | 2310 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2311 |  | 
|  | 2312 | scrub_free_dev(sdev); | 
|  | 2313 | scrub_workers_put(root); | 
|  | 2314 |  | 
|  | 2315 | return ret; | 
|  | 2316 | } | 
|  | 2317 |  | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 2318 | void btrfs_scrub_pause(struct btrfs_root *root) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2319 | { | 
|  | 2320 | struct btrfs_fs_info *fs_info = root->fs_info; | 
|  | 2321 |  | 
|  | 2322 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2323 | atomic_inc(&fs_info->scrub_pause_req); | 
|  | 2324 | while (atomic_read(&fs_info->scrubs_paused) != | 
|  | 2325 | atomic_read(&fs_info->scrubs_running)) { | 
|  | 2326 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2327 | wait_event(fs_info->scrub_pause_wait, | 
|  | 2328 | atomic_read(&fs_info->scrubs_paused) == | 
|  | 2329 | atomic_read(&fs_info->scrubs_running)); | 
|  | 2330 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2331 | } | 
|  | 2332 | mutex_unlock(&fs_info->scrub_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2333 | } | 
|  | 2334 |  | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 2335 | void btrfs_scrub_continue(struct btrfs_root *root) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2336 | { | 
|  | 2337 | struct btrfs_fs_info *fs_info = root->fs_info; | 
|  | 2338 |  | 
|  | 2339 | atomic_dec(&fs_info->scrub_pause_req); | 
|  | 2340 | wake_up(&fs_info->scrub_pause_wait); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2341 | } | 
|  | 2342 |  | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 2343 | void btrfs_scrub_pause_super(struct btrfs_root *root) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2344 | { | 
|  | 2345 | down_write(&root->fs_info->scrub_super_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2346 | } | 
|  | 2347 |  | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 2348 | void btrfs_scrub_continue_super(struct btrfs_root *root) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2349 | { | 
|  | 2350 | up_write(&root->fs_info->scrub_super_lock); | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2351 | } | 
|  | 2352 |  | 
| Jeff Mahoney | 49b25e0 | 2012-03-01 17:24:58 +0100 | [diff] [blame] | 2353 | int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2354 | { | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2355 |  | 
|  | 2356 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2357 | if (!atomic_read(&fs_info->scrubs_running)) { | 
|  | 2358 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2359 | return -ENOTCONN; | 
|  | 2360 | } | 
|  | 2361 |  | 
|  | 2362 | atomic_inc(&fs_info->scrub_cancel_req); | 
|  | 2363 | while (atomic_read(&fs_info->scrubs_running)) { | 
|  | 2364 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2365 | wait_event(fs_info->scrub_pause_wait, | 
|  | 2366 | atomic_read(&fs_info->scrubs_running) == 0); | 
|  | 2367 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2368 | } | 
|  | 2369 | atomic_dec(&fs_info->scrub_cancel_req); | 
|  | 2370 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2371 |  | 
|  | 2372 | return 0; | 
|  | 2373 | } | 
|  | 2374 |  | 
| Jeff Mahoney | 49b25e0 | 2012-03-01 17:24:58 +0100 | [diff] [blame] | 2375 | int btrfs_scrub_cancel(struct btrfs_root *root) | 
|  | 2376 | { | 
|  | 2377 | return __btrfs_scrub_cancel(root->fs_info); | 
|  | 2378 | } | 
|  | 2379 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2380 | int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) | 
|  | 2381 | { | 
|  | 2382 | struct btrfs_fs_info *fs_info = root->fs_info; | 
|  | 2383 | struct scrub_dev *sdev; | 
|  | 2384 |  | 
|  | 2385 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2386 | sdev = dev->scrub_device; | 
|  | 2387 | if (!sdev) { | 
|  | 2388 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2389 | return -ENOTCONN; | 
|  | 2390 | } | 
|  | 2391 | atomic_inc(&sdev->cancel_req); | 
|  | 2392 | while (dev->scrub_device) { | 
|  | 2393 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2394 | wait_event(fs_info->scrub_pause_wait, | 
|  | 2395 | dev->scrub_device == NULL); | 
|  | 2396 | mutex_lock(&fs_info->scrub_lock); | 
|  | 2397 | } | 
|  | 2398 | mutex_unlock(&fs_info->scrub_lock); | 
|  | 2399 |  | 
|  | 2400 | return 0; | 
|  | 2401 | } | 
| Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 2402 |  | 
| Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2403 | int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) | 
|  | 2404 | { | 
|  | 2405 | struct btrfs_fs_info *fs_info = root->fs_info; | 
|  | 2406 | struct btrfs_device *dev; | 
|  | 2407 | int ret; | 
|  | 2408 |  | 
|  | 2409 | /* | 
|  | 2410 | * we have to hold the device_list_mutex here so the device | 
|  | 2411 | * does not go away in cancel_dev. FIXME: find a better solution | 
|  | 2412 | */ | 
|  | 2413 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | 
|  | 2414 | dev = btrfs_find_device(root, devid, NULL, NULL); | 
|  | 2415 | if (!dev) { | 
|  | 2416 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
|  | 2417 | return -ENODEV; | 
|  | 2418 | } | 
|  | 2419 | ret = btrfs_scrub_cancel_dev(root, dev); | 
|  | 2420 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 
|  | 2421 |  | 
|  | 2422 | return ret; | 
|  | 2423 | } | 
|  | 2424 |  | 
|  | 2425 | int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, | 
|  | 2426 | struct btrfs_scrub_progress *progress) | 
|  | 2427 | { | 
|  | 2428 | struct btrfs_device *dev; | 
|  | 2429 | struct scrub_dev *sdev = NULL; | 
|  | 2430 |  | 
|  | 2431 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2432 | dev = btrfs_find_device(root, devid, NULL, NULL); | 
|  | 2433 | if (dev) | 
|  | 2434 | sdev = dev->scrub_device; | 
|  | 2435 | if (sdev) | 
|  | 2436 | memcpy(progress, &sdev->stat, sizeof(*progress)); | 
|  | 2437 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 
|  | 2438 |  | 
|  | 2439 | return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; | 
|  | 2440 | } |