blob: c705a48e676b28d3b76eb7c34184daa051e8f2eb [file] [log] [blame]
Arne Jansen7414a032011-05-23 14:33:49 +02001/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include "ctree.h"
27#include "volumes.h"
28#include "disk-io.h"
29#include "transaction.h"
Stefan Behrens8dabb742012-11-06 13:15:27 +010030#include "dev-replace.h"
Arne Jansen7414a032011-05-23 14:33:49 +020031
32#undef DEBUG
33
34/*
35 * This is the implementation for the generic read ahead framework.
36 *
37 * To trigger a readahead, btrfs_reada_add must be called. It will start
38 * a read ahead for the given range [start, end) on tree root. The returned
39 * handle can either be used to wait on the readahead to finish
40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41 *
42 * The read ahead works as follows:
43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44 * reada_start_machine will then search for extents to prefetch and trigger
45 * some reads. When a read finishes for a node, all contained node/leaf
46 * pointers that lie in the given range will also be enqueued. The reads will
47 * be triggered in sequential order, thus giving a big win over a naive
48 * enumeration. It will also make use of multi-device layouts. Each disk
49 * will have its on read pointer and all disks will by utilized in parallel.
50 * Also will no two disks read both sides of a mirror simultaneously, as this
51 * would waste seeking capacity. Instead both disks will read different parts
52 * of the filesystem.
53 * Any number of readaheads can be started in parallel. The read order will be
54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
55 * than the 2 started one after another.
56 */
57
Arne Jansen7414a032011-05-23 14:33:49 +020058#define MAX_IN_FLIGHT 6
59
60struct reada_extctl {
61 struct list_head list;
62 struct reada_control *rc;
63 u64 generation;
64};
65
66struct reada_extent {
67 u64 logical;
68 struct btrfs_key top;
69 u32 blocksize;
70 int err;
71 struct list_head extctl;
Al Viro99621b42012-08-29 16:31:33 -040072 int refcnt;
Arne Jansen7414a032011-05-23 14:33:49 +020073 spinlock_t lock;
Stefan Behrens94598ba2012-03-27 14:21:26 -040074 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
Arne Jansen7414a032011-05-23 14:33:49 +020075 int nzones;
76 struct btrfs_device *scheduled_for;
77};
78
79struct reada_zone {
80 u64 start;
81 u64 end;
82 u64 elems;
83 struct list_head list;
84 spinlock_t lock;
85 int locked;
86 struct btrfs_device *device;
Stefan Behrens94598ba2012-03-27 14:21:26 -040087 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
88 * self */
Arne Jansen7414a032011-05-23 14:33:49 +020089 int ndevs;
90 struct kref refcnt;
91};
92
93struct reada_machine_work {
94 struct btrfs_work work;
95 struct btrfs_fs_info *fs_info;
96};
97
98static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
99static void reada_control_release(struct kref *kref);
100static void reada_zone_release(struct kref *kref);
101static void reada_start_machine(struct btrfs_fs_info *fs_info);
102static void __reada_start_machine(struct btrfs_fs_info *fs_info);
103
104static int reada_add_block(struct reada_control *rc, u64 logical,
105 struct btrfs_key *top, int level, u64 generation);
106
107/* recurses */
108/* in case of err, eb might be NULL */
109static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
110 u64 start, int err)
111{
112 int level = 0;
113 int nritems;
114 int i;
115 u64 bytenr;
116 u64 generation;
117 struct reada_extent *re;
118 struct btrfs_fs_info *fs_info = root->fs_info;
119 struct list_head list;
120 unsigned long index = start >> PAGE_CACHE_SHIFT;
121 struct btrfs_device *for_dev;
122
123 if (eb)
124 level = btrfs_header_level(eb);
125
126 /* find extent */
127 spin_lock(&fs_info->reada_lock);
128 re = radix_tree_lookup(&fs_info->reada_tree, index);
129 if (re)
Al Viro99621b42012-08-29 16:31:33 -0400130 re->refcnt++;
Arne Jansen7414a032011-05-23 14:33:49 +0200131 spin_unlock(&fs_info->reada_lock);
132
133 if (!re)
134 return -1;
135
136 spin_lock(&re->lock);
137 /*
138 * just take the full list from the extent. afterwards we
139 * don't need the lock anymore
140 */
141 list_replace_init(&re->extctl, &list);
142 for_dev = re->scheduled_for;
143 re->scheduled_for = NULL;
144 spin_unlock(&re->lock);
145
146 if (err == 0) {
147 nritems = level ? btrfs_header_nritems(eb) : 0;
148 generation = btrfs_header_generation(eb);
149 /*
150 * FIXME: currently we just set nritems to 0 if this is a leaf,
151 * effectively ignoring the content. In a next step we could
152 * trigger more readahead depending from the content, e.g.
153 * fetch the checksums for the extents in the leaf.
154 */
155 } else {
156 /*
157 * this is the error case, the extent buffer has not been
158 * read correctly. We won't access anything from it and
159 * just cleanup our data structures. Effectively this will
160 * cut the branch below this node from read ahead.
161 */
162 nritems = 0;
163 generation = 0;
164 }
165
166 for (i = 0; i < nritems; i++) {
167 struct reada_extctl *rec;
168 u64 n_gen;
169 struct btrfs_key key;
170 struct btrfs_key next_key;
171
172 btrfs_node_key_to_cpu(eb, &key, i);
173 if (i + 1 < nritems)
174 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
175 else
176 next_key = re->top;
177 bytenr = btrfs_node_blockptr(eb, i);
178 n_gen = btrfs_node_ptr_generation(eb, i);
179
180 list_for_each_entry(rec, &list, list) {
181 struct reada_control *rc = rec->rc;
182
183 /*
184 * if the generation doesn't match, just ignore this
185 * extctl. This will probably cut off a branch from
186 * prefetch. Alternatively one could start a new (sub-)
187 * prefetch for this branch, starting again from root.
188 * FIXME: move the generation check out of this loop
189 */
190#ifdef DEBUG
191 if (rec->generation != generation) {
192 printk(KERN_DEBUG "generation mismatch for "
193 "(%llu,%d,%llu) %llu != %llu\n",
194 key.objectid, key.type, key.offset,
195 rec->generation, generation);
196 }
197#endif
198 if (rec->generation == generation &&
199 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
200 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
201 reada_add_block(rc, bytenr, &next_key,
202 level - 1, n_gen);
203 }
204 }
205 /*
206 * free extctl records
207 */
208 while (!list_empty(&list)) {
209 struct reada_control *rc;
210 struct reada_extctl *rec;
211
212 rec = list_first_entry(&list, struct reada_extctl, list);
213 list_del(&rec->list);
214 rc = rec->rc;
215 kfree(rec);
216
217 kref_get(&rc->refcnt);
218 if (atomic_dec_and_test(&rc->elems)) {
219 kref_put(&rc->refcnt, reada_control_release);
220 wake_up(&rc->wait);
221 }
222 kref_put(&rc->refcnt, reada_control_release);
223
224 reada_extent_put(fs_info, re); /* one ref for each entry */
225 }
226 reada_extent_put(fs_info, re); /* our ref */
227 if (for_dev)
228 atomic_dec(&for_dev->reada_in_flight);
229
230 return 0;
231}
232
233/*
234 * start is passed separately in case eb in NULL, which may be the case with
235 * failed I/O
236 */
237int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
238 u64 start, int err)
239{
240 int ret;
241
242 ret = __readahead_hook(root, eb, start, err);
243
244 reada_start_machine(root->fs_info);
245
246 return ret;
247}
248
249static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
250 struct btrfs_device *dev, u64 logical,
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400251 struct btrfs_bio *bbio)
Arne Jansen7414a032011-05-23 14:33:49 +0200252{
253 int ret;
Arne Jansen7414a032011-05-23 14:33:49 +0200254 struct reada_zone *zone;
255 struct btrfs_block_group_cache *cache = NULL;
256 u64 start;
257 u64 end;
258 int i;
259
Arne Jansen7414a032011-05-23 14:33:49 +0200260 zone = NULL;
261 spin_lock(&fs_info->reada_lock);
262 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
263 logical >> PAGE_CACHE_SHIFT, 1);
264 if (ret == 1)
265 kref_get(&zone->refcnt);
266 spin_unlock(&fs_info->reada_lock);
267
268 if (ret == 1) {
269 if (logical >= zone->start && logical < zone->end)
270 return zone;
271 spin_lock(&fs_info->reada_lock);
272 kref_put(&zone->refcnt, reada_zone_release);
273 spin_unlock(&fs_info->reada_lock);
274 }
275
Arne Jansen7414a032011-05-23 14:33:49 +0200276 cache = btrfs_lookup_block_group(fs_info, logical);
277 if (!cache)
278 return NULL;
279
280 start = cache->key.objectid;
281 end = start + cache->key.offset - 1;
282 btrfs_put_block_group(cache);
283
284 zone = kzalloc(sizeof(*zone), GFP_NOFS);
285 if (!zone)
286 return NULL;
287
288 zone->start = start;
289 zone->end = end;
290 INIT_LIST_HEAD(&zone->list);
291 spin_lock_init(&zone->lock);
292 zone->locked = 0;
293 kref_init(&zone->refcnt);
294 zone->elems = 0;
295 zone->device = dev; /* our device always sits at index 0 */
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400296 for (i = 0; i < bbio->num_stripes; ++i) {
Arne Jansen7414a032011-05-23 14:33:49 +0200297 /* bounds have already been checked */
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400298 zone->devs[i] = bbio->stripes[i].dev;
Arne Jansen7414a032011-05-23 14:33:49 +0200299 }
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400300 zone->ndevs = bbio->num_stripes;
Arne Jansen7414a032011-05-23 14:33:49 +0200301
302 spin_lock(&fs_info->reada_lock);
303 ret = radix_tree_insert(&dev->reada_zones,
Chris Masona1754232012-02-28 12:42:44 -0500304 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
Arne Jansen7414a032011-05-23 14:33:49 +0200305 zone);
Arne Jansen7414a032011-05-23 14:33:49 +0200306
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100307 if (ret == -EEXIST) {
Arne Jansen7414a032011-05-23 14:33:49 +0200308 kfree(zone);
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100309 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
310 logical >> PAGE_CACHE_SHIFT, 1);
311 if (ret == 1)
312 kref_get(&zone->refcnt);
Arne Jansen7414a032011-05-23 14:33:49 +0200313 }
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100314 spin_unlock(&fs_info->reada_lock);
Arne Jansen7414a032011-05-23 14:33:49 +0200315
316 return zone;
317}
318
319static struct reada_extent *reada_find_extent(struct btrfs_root *root,
320 u64 logical,
321 struct btrfs_key *top, int level)
322{
323 int ret;
Arne Jansen7414a032011-05-23 14:33:49 +0200324 struct reada_extent *re = NULL;
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100325 struct reada_extent *re_exist = NULL;
Arne Jansen7414a032011-05-23 14:33:49 +0200326 struct btrfs_fs_info *fs_info = root->fs_info;
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400327 struct btrfs_bio *bbio = NULL;
Arne Jansen7414a032011-05-23 14:33:49 +0200328 struct btrfs_device *dev;
Arne Jansen207a2322012-02-25 09:09:47 +0100329 struct btrfs_device *prev_dev;
Arne Jansen7414a032011-05-23 14:33:49 +0200330 u32 blocksize;
331 u64 length;
332 int nzones = 0;
333 int i;
334 unsigned long index = logical >> PAGE_CACHE_SHIFT;
Stefan Behrens8dabb742012-11-06 13:15:27 +0100335 int dev_replace_is_ongoing;
Arne Jansen7414a032011-05-23 14:33:49 +0200336
Arne Jansen7414a032011-05-23 14:33:49 +0200337 spin_lock(&fs_info->reada_lock);
338 re = radix_tree_lookup(&fs_info->reada_tree, index);
339 if (re)
Al Viro99621b42012-08-29 16:31:33 -0400340 re->refcnt++;
Arne Jansen7414a032011-05-23 14:33:49 +0200341 spin_unlock(&fs_info->reada_lock);
342
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100343 if (re)
Arne Jansen7414a032011-05-23 14:33:49 +0200344 return re;
345
346 re = kzalloc(sizeof(*re), GFP_NOFS);
347 if (!re)
348 return NULL;
349
350 blocksize = btrfs_level_size(root, level);
351 re->logical = logical;
352 re->blocksize = blocksize;
353 re->top = *top;
354 INIT_LIST_HEAD(&re->extctl);
355 spin_lock_init(&re->lock);
Al Viro99621b42012-08-29 16:31:33 -0400356 re->refcnt = 1;
Arne Jansen7414a032011-05-23 14:33:49 +0200357
358 /*
359 * map block
360 */
361 length = blocksize;
Stefan Behrens3ec706c2012-11-05 15:46:42 +0100362 ret = btrfs_map_block(fs_info, REQ_WRITE, logical, &length, &bbio, 0);
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400363 if (ret || !bbio || length < blocksize)
Arne Jansen7414a032011-05-23 14:33:49 +0200364 goto error;
365
Stefan Behrens94598ba2012-03-27 14:21:26 -0400366 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
Arne Jansen7414a032011-05-23 14:33:49 +0200367 printk(KERN_ERR "btrfs readahead: more than %d copies not "
Stefan Behrens94598ba2012-03-27 14:21:26 -0400368 "supported", BTRFS_MAX_MIRRORS);
Arne Jansen7414a032011-05-23 14:33:49 +0200369 goto error;
370 }
371
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400372 for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
Arne Jansen7414a032011-05-23 14:33:49 +0200373 struct reada_zone *zone;
374
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400375 dev = bbio->stripes[nzones].dev;
376 zone = reada_find_zone(fs_info, dev, logical, bbio);
Arne Jansen7414a032011-05-23 14:33:49 +0200377 if (!zone)
378 break;
379
380 re->zones[nzones] = zone;
381 spin_lock(&zone->lock);
382 if (!zone->elems)
383 kref_get(&zone->refcnt);
384 ++zone->elems;
385 spin_unlock(&zone->lock);
386 spin_lock(&fs_info->reada_lock);
387 kref_put(&zone->refcnt, reada_zone_release);
388 spin_unlock(&fs_info->reada_lock);
389 }
390 re->nzones = nzones;
391 if (nzones == 0) {
392 /* not a single zone found, error and out */
393 goto error;
394 }
395
396 /* insert extent in reada_tree + all per-device trees, all or nothing */
Stefan Behrens8dabb742012-11-06 13:15:27 +0100397 btrfs_dev_replace_lock(&fs_info->dev_replace);
Arne Jansen7414a032011-05-23 14:33:49 +0200398 spin_lock(&fs_info->reada_lock);
399 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100400 if (ret == -EEXIST) {
401 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
402 BUG_ON(!re_exist);
Al Viro99621b42012-08-29 16:31:33 -0400403 re_exist->refcnt++;
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100404 spin_unlock(&fs_info->reada_lock);
Stefan Behrens8dabb742012-11-06 13:15:27 +0100405 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100406 goto error;
407 }
Arne Jansen7414a032011-05-23 14:33:49 +0200408 if (ret) {
409 spin_unlock(&fs_info->reada_lock);
Stefan Behrens8dabb742012-11-06 13:15:27 +0100410 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansen7414a032011-05-23 14:33:49 +0200411 goto error;
412 }
Arne Jansen207a2322012-02-25 09:09:47 +0100413 prev_dev = NULL;
Stefan Behrens8dabb742012-11-06 13:15:27 +0100414 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
415 &fs_info->dev_replace);
Arne Jansen7414a032011-05-23 14:33:49 +0200416 for (i = 0; i < nzones; ++i) {
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400417 dev = bbio->stripes[i].dev;
Arne Jansen207a2322012-02-25 09:09:47 +0100418 if (dev == prev_dev) {
419 /*
420 * in case of DUP, just add the first zone. As both
421 * are on the same device, there's nothing to gain
422 * from adding both.
423 * Also, it wouldn't work, as the tree is per device
424 * and adding would fail with EEXIST
425 */
426 continue;
427 }
Stefan Behrensff023aa2012-11-06 11:43:11 +0100428 if (!dev->bdev) {
429 /* cannot read ahead on missing device */
430 continue;
431 }
Stefan Behrens8dabb742012-11-06 13:15:27 +0100432 if (dev_replace_is_ongoing &&
433 dev == fs_info->dev_replace.tgtdev) {
434 /*
435 * as this device is selected for reading only as
436 * a last resort, skip it for read ahead.
437 */
438 continue;
439 }
Arne Jansen207a2322012-02-25 09:09:47 +0100440 prev_dev = dev;
Arne Jansen7414a032011-05-23 14:33:49 +0200441 ret = radix_tree_insert(&dev->reada_extents, index, re);
442 if (ret) {
443 while (--i >= 0) {
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400444 dev = bbio->stripes[i].dev;
Arne Jansen7414a032011-05-23 14:33:49 +0200445 BUG_ON(dev == NULL);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100446 /* ignore whether the entry was inserted */
Arne Jansen7414a032011-05-23 14:33:49 +0200447 radix_tree_delete(&dev->reada_extents, index);
448 }
449 BUG_ON(fs_info == NULL);
450 radix_tree_delete(&fs_info->reada_tree, index);
451 spin_unlock(&fs_info->reada_lock);
Stefan Behrens8dabb742012-11-06 13:15:27 +0100452 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansen7414a032011-05-23 14:33:49 +0200453 goto error;
454 }
455 }
456 spin_unlock(&fs_info->reada_lock);
Stefan Behrens8dabb742012-11-06 13:15:27 +0100457 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansen7414a032011-05-23 14:33:49 +0200458
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400459 kfree(bbio);
Arne Jansen7414a032011-05-23 14:33:49 +0200460 return re;
461
462error:
463 while (nzones) {
464 struct reada_zone *zone;
465
466 --nzones;
467 zone = re->zones[nzones];
468 kref_get(&zone->refcnt);
469 spin_lock(&zone->lock);
470 --zone->elems;
471 if (zone->elems == 0) {
472 /*
473 * no fs_info->reada_lock needed, as this can't be
474 * the last ref
475 */
476 kref_put(&zone->refcnt, reada_zone_release);
477 }
478 spin_unlock(&zone->lock);
479
480 spin_lock(&fs_info->reada_lock);
481 kref_put(&zone->refcnt, reada_zone_release);
482 spin_unlock(&fs_info->reada_lock);
483 }
Ilya Dryomov21ca5432011-11-04 09:41:02 -0400484 kfree(bbio);
Arne Jansen7414a032011-05-23 14:33:49 +0200485 kfree(re);
Arne Jansen8c9c2bf2012-02-25 09:09:30 +0100486 return re_exist;
Arne Jansen7414a032011-05-23 14:33:49 +0200487}
488
Arne Jansen7414a032011-05-23 14:33:49 +0200489static void reada_extent_put(struct btrfs_fs_info *fs_info,
490 struct reada_extent *re)
491{
492 int i;
493 unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
494
495 spin_lock(&fs_info->reada_lock);
Al Viro99621b42012-08-29 16:31:33 -0400496 if (--re->refcnt) {
Arne Jansen7414a032011-05-23 14:33:49 +0200497 spin_unlock(&fs_info->reada_lock);
498 return;
499 }
500
501 radix_tree_delete(&fs_info->reada_tree, index);
502 for (i = 0; i < re->nzones; ++i) {
503 struct reada_zone *zone = re->zones[i];
504
505 radix_tree_delete(&zone->device->reada_extents, index);
506 }
507
508 spin_unlock(&fs_info->reada_lock);
509
510 for (i = 0; i < re->nzones; ++i) {
511 struct reada_zone *zone = re->zones[i];
512
513 kref_get(&zone->refcnt);
514 spin_lock(&zone->lock);
515 --zone->elems;
516 if (zone->elems == 0) {
517 /* no fs_info->reada_lock needed, as this can't be
518 * the last ref */
519 kref_put(&zone->refcnt, reada_zone_release);
520 }
521 spin_unlock(&zone->lock);
522
523 spin_lock(&fs_info->reada_lock);
524 kref_put(&zone->refcnt, reada_zone_release);
525 spin_unlock(&fs_info->reada_lock);
526 }
527 if (re->scheduled_for)
528 atomic_dec(&re->scheduled_for->reada_in_flight);
529
530 kfree(re);
531}
532
533static void reada_zone_release(struct kref *kref)
534{
535 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
536
537 radix_tree_delete(&zone->device->reada_zones,
538 zone->end >> PAGE_CACHE_SHIFT);
539
540 kfree(zone);
541}
542
543static void reada_control_release(struct kref *kref)
544{
545 struct reada_control *rc = container_of(kref, struct reada_control,
546 refcnt);
547
548 kfree(rc);
549}
550
551static int reada_add_block(struct reada_control *rc, u64 logical,
552 struct btrfs_key *top, int level, u64 generation)
553{
554 struct btrfs_root *root = rc->root;
555 struct reada_extent *re;
556 struct reada_extctl *rec;
557
558 re = reada_find_extent(root, logical, top, level); /* takes one ref */
559 if (!re)
560 return -1;
561
562 rec = kzalloc(sizeof(*rec), GFP_NOFS);
563 if (!rec) {
564 reada_extent_put(root->fs_info, re);
565 return -1;
566 }
567
568 rec->rc = rc;
569 rec->generation = generation;
570 atomic_inc(&rc->elems);
571
572 spin_lock(&re->lock);
573 list_add_tail(&rec->list, &re->extctl);
574 spin_unlock(&re->lock);
575
576 /* leave the ref on the extent */
577
578 return 0;
579}
580
581/*
582 * called with fs_info->reada_lock held
583 */
584static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
585{
586 int i;
587 unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
588
589 for (i = 0; i < zone->ndevs; ++i) {
590 struct reada_zone *peer;
591 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
592 if (peer && peer->device != zone->device)
593 peer->locked = lock;
594 }
595}
596
597/*
598 * called with fs_info->reada_lock held
599 */
600static int reada_pick_zone(struct btrfs_device *dev)
601{
602 struct reada_zone *top_zone = NULL;
603 struct reada_zone *top_locked_zone = NULL;
604 u64 top_elems = 0;
605 u64 top_locked_elems = 0;
606 unsigned long index = 0;
607 int ret;
608
609 if (dev->reada_curr_zone) {
610 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
611 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
612 dev->reada_curr_zone = NULL;
613 }
614 /* pick the zone with the most elements */
615 while (1) {
616 struct reada_zone *zone;
617
618 ret = radix_tree_gang_lookup(&dev->reada_zones,
619 (void **)&zone, index, 1);
620 if (ret == 0)
621 break;
622 index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
623 if (zone->locked) {
624 if (zone->elems > top_locked_elems) {
625 top_locked_elems = zone->elems;
626 top_locked_zone = zone;
627 }
628 } else {
629 if (zone->elems > top_elems) {
630 top_elems = zone->elems;
631 top_zone = zone;
632 }
633 }
634 }
635 if (top_zone)
636 dev->reada_curr_zone = top_zone;
637 else if (top_locked_zone)
638 dev->reada_curr_zone = top_locked_zone;
639 else
640 return 0;
641
642 dev->reada_next = dev->reada_curr_zone->start;
643 kref_get(&dev->reada_curr_zone->refcnt);
644 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
645
646 return 1;
647}
648
649static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
650 struct btrfs_device *dev)
651{
652 struct reada_extent *re = NULL;
653 int mirror_num = 0;
654 struct extent_buffer *eb = NULL;
655 u64 logical;
656 u32 blocksize;
657 int ret;
658 int i;
659 int need_kick = 0;
660
661 spin_lock(&fs_info->reada_lock);
662 if (dev->reada_curr_zone == NULL) {
663 ret = reada_pick_zone(dev);
664 if (!ret) {
665 spin_unlock(&fs_info->reada_lock);
666 return 0;
667 }
668 }
669 /*
670 * FIXME currently we issue the reads one extent at a time. If we have
671 * a contiguous block of extents, we could also coagulate them or use
672 * plugging to speed things up
673 */
674 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
675 dev->reada_next >> PAGE_CACHE_SHIFT, 1);
676 if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
677 ret = reada_pick_zone(dev);
678 if (!ret) {
679 spin_unlock(&fs_info->reada_lock);
680 return 0;
681 }
682 re = NULL;
683 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
684 dev->reada_next >> PAGE_CACHE_SHIFT, 1);
685 }
686 if (ret == 0) {
687 spin_unlock(&fs_info->reada_lock);
688 return 0;
689 }
690 dev->reada_next = re->logical + re->blocksize;
Al Viro99621b42012-08-29 16:31:33 -0400691 re->refcnt++;
Arne Jansen7414a032011-05-23 14:33:49 +0200692
693 spin_unlock(&fs_info->reada_lock);
694
695 /*
696 * find mirror num
697 */
698 for (i = 0; i < re->nzones; ++i) {
699 if (re->zones[i]->device == dev) {
700 mirror_num = i + 1;
701 break;
702 }
703 }
704 logical = re->logical;
705 blocksize = re->blocksize;
706
707 spin_lock(&re->lock);
708 if (re->scheduled_for == NULL) {
709 re->scheduled_for = dev;
710 need_kick = 1;
711 }
712 spin_unlock(&re->lock);
713
714 reada_extent_put(fs_info, re);
715
716 if (!need_kick)
717 return 0;
718
719 atomic_inc(&dev->reada_in_flight);
720 ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
721 mirror_num, &eb);
722 if (ret)
723 __readahead_hook(fs_info->extent_root, NULL, logical, ret);
724 else if (eb)
725 __readahead_hook(fs_info->extent_root, eb, eb->start, ret);
726
727 if (eb)
728 free_extent_buffer(eb);
729
730 return 1;
731
732}
733
734static void reada_start_machine_worker(struct btrfs_work *work)
735{
736 struct reada_machine_work *rmw;
737 struct btrfs_fs_info *fs_info;
Stefan Behrens3d136a12012-02-03 11:20:04 +0100738 int old_ioprio;
Arne Jansen7414a032011-05-23 14:33:49 +0200739
740 rmw = container_of(work, struct reada_machine_work, work);
741 fs_info = rmw->fs_info;
742
743 kfree(rmw);
744
Stefan Behrens3d136a12012-02-03 11:20:04 +0100745 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
746 task_nice_ioprio(current));
747 set_task_ioprio(current, BTRFS_IOPRIO_READA);
Arne Jansen7414a032011-05-23 14:33:49 +0200748 __reada_start_machine(fs_info);
Stefan Behrens3d136a12012-02-03 11:20:04 +0100749 set_task_ioprio(current, old_ioprio);
Arne Jansen7414a032011-05-23 14:33:49 +0200750}
751
752static void __reada_start_machine(struct btrfs_fs_info *fs_info)
753{
754 struct btrfs_device *device;
755 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
756 u64 enqueued;
757 u64 total = 0;
758 int i;
759
760 do {
761 enqueued = 0;
762 list_for_each_entry(device, &fs_devices->devices, dev_list) {
763 if (atomic_read(&device->reada_in_flight) <
764 MAX_IN_FLIGHT)
765 enqueued += reada_start_machine_dev(fs_info,
766 device);
767 }
768 total += enqueued;
769 } while (enqueued && total < 10000);
770
771 if (enqueued == 0)
772 return;
773
774 /*
775 * If everything is already in the cache, this is effectively single
776 * threaded. To a) not hold the caller for too long and b) to utilize
777 * more cores, we broke the loop above after 10000 iterations and now
778 * enqueue to workers to finish it. This will distribute the load to
779 * the cores.
780 */
781 for (i = 0; i < 2; ++i)
782 reada_start_machine(fs_info);
783}
784
785static void reada_start_machine(struct btrfs_fs_info *fs_info)
786{
787 struct reada_machine_work *rmw;
788
789 rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
790 if (!rmw) {
791 /* FIXME we cannot handle this properly right now */
792 BUG();
793 }
794 rmw->work.func = reada_start_machine_worker;
795 rmw->fs_info = fs_info;
796
797 btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work);
798}
799
800#ifdef DEBUG
801static void dump_devs(struct btrfs_fs_info *fs_info, int all)
802{
803 struct btrfs_device *device;
804 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
805 unsigned long index;
806 int ret;
807 int i;
808 int j;
809 int cnt;
810
811 spin_lock(&fs_info->reada_lock);
812 list_for_each_entry(device, &fs_devices->devices, dev_list) {
813 printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
814 atomic_read(&device->reada_in_flight));
815 index = 0;
816 while (1) {
817 struct reada_zone *zone;
818 ret = radix_tree_gang_lookup(&device->reada_zones,
819 (void **)&zone, index, 1);
820 if (ret == 0)
821 break;
822 printk(KERN_DEBUG " zone %llu-%llu elems %llu locked "
823 "%d devs", zone->start, zone->end, zone->elems,
824 zone->locked);
825 for (j = 0; j < zone->ndevs; ++j) {
826 printk(KERN_CONT " %lld",
827 zone->devs[j]->devid);
828 }
829 if (device->reada_curr_zone == zone)
830 printk(KERN_CONT " curr off %llu",
831 device->reada_next - zone->start);
832 printk(KERN_CONT "\n");
833 index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
834 }
835 cnt = 0;
836 index = 0;
837 while (all) {
838 struct reada_extent *re = NULL;
839
840 ret = radix_tree_gang_lookup(&device->reada_extents,
841 (void **)&re, index, 1);
842 if (ret == 0)
843 break;
844 printk(KERN_DEBUG
845 " re: logical %llu size %u empty %d for %lld",
846 re->logical, re->blocksize,
847 list_empty(&re->extctl), re->scheduled_for ?
848 re->scheduled_for->devid : -1);
849
850 for (i = 0; i < re->nzones; ++i) {
851 printk(KERN_CONT " zone %llu-%llu devs",
852 re->zones[i]->start,
853 re->zones[i]->end);
854 for (j = 0; j < re->zones[i]->ndevs; ++j) {
855 printk(KERN_CONT " %lld",
856 re->zones[i]->devs[j]->devid);
857 }
858 }
859 printk(KERN_CONT "\n");
860 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
861 if (++cnt > 15)
862 break;
863 }
864 }
865
866 index = 0;
867 cnt = 0;
868 while (all) {
869 struct reada_extent *re = NULL;
870
871 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
872 index, 1);
873 if (ret == 0)
874 break;
875 if (!re->scheduled_for) {
876 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
877 continue;
878 }
879 printk(KERN_DEBUG
880 "re: logical %llu size %u list empty %d for %lld",
881 re->logical, re->blocksize, list_empty(&re->extctl),
882 re->scheduled_for ? re->scheduled_for->devid : -1);
883 for (i = 0; i < re->nzones; ++i) {
884 printk(KERN_CONT " zone %llu-%llu devs",
885 re->zones[i]->start,
886 re->zones[i]->end);
887 for (i = 0; i < re->nzones; ++i) {
888 printk(KERN_CONT " zone %llu-%llu devs",
889 re->zones[i]->start,
890 re->zones[i]->end);
891 for (j = 0; j < re->zones[i]->ndevs; ++j) {
892 printk(KERN_CONT " %lld",
893 re->zones[i]->devs[j]->devid);
894 }
895 }
896 }
897 printk(KERN_CONT "\n");
898 index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
899 }
900 spin_unlock(&fs_info->reada_lock);
901}
902#endif
903
904/*
905 * interface
906 */
907struct reada_control *btrfs_reada_add(struct btrfs_root *root,
908 struct btrfs_key *key_start, struct btrfs_key *key_end)
909{
910 struct reada_control *rc;
911 u64 start;
912 u64 generation;
913 int level;
914 struct extent_buffer *node;
915 static struct btrfs_key max_key = {
916 .objectid = (u64)-1,
917 .type = (u8)-1,
918 .offset = (u64)-1
919 };
920
921 rc = kzalloc(sizeof(*rc), GFP_NOFS);
922 if (!rc)
923 return ERR_PTR(-ENOMEM);
924
925 rc->root = root;
926 rc->key_start = *key_start;
927 rc->key_end = *key_end;
928 atomic_set(&rc->elems, 0);
929 init_waitqueue_head(&rc->wait);
930 kref_init(&rc->refcnt);
931 kref_get(&rc->refcnt); /* one ref for having elements */
932
933 node = btrfs_root_node(root);
934 start = node->start;
935 level = btrfs_header_level(node);
936 generation = btrfs_header_generation(node);
937 free_extent_buffer(node);
938
Stefan Behrensff023aa2012-11-06 11:43:11 +0100939 if (reada_add_block(rc, start, &max_key, level, generation)) {
940 kfree(rc);
941 return ERR_PTR(-ENOMEM);
942 }
Arne Jansen7414a032011-05-23 14:33:49 +0200943
944 reada_start_machine(root->fs_info);
945
946 return rc;
947}
948
949#ifdef DEBUG
950int btrfs_reada_wait(void *handle)
951{
952 struct reada_control *rc = handle;
953
954 while (atomic_read(&rc->elems)) {
955 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
956 5 * HZ);
957 dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
958 }
959
960 dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
961
962 kref_put(&rc->refcnt, reada_control_release);
963
964 return 0;
965}
966#else
967int btrfs_reada_wait(void *handle)
968{
969 struct reada_control *rc = handle;
970
971 while (atomic_read(&rc->elems)) {
972 wait_event(rc->wait, atomic_read(&rc->elems) == 0);
973 }
974
975 kref_put(&rc->refcnt, reada_control_release);
976
977 return 0;
978}
979#endif
980
981void btrfs_reada_detach(void *handle)
982{
983 struct reada_control *rc = handle;
984
985 kref_put(&rc->refcnt, reada_control_release);
986}