blob: 97f6703fd493ec26c524a918bc36121652cc71c6 [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05005#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050011#include <linux/writeback.h>
12#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070013#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060014#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050015#include "extent_io.h"
16#include "extent_map.h"
David Woodhouse2db04962008-08-07 11:19:43 -040017#include "compat.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040018#include "ctree.h"
19#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020020#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010021#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040022#include "locking.h"
Chris Masond1310b22008-01-24 16:13:08 -050023
Chris Masond1310b22008-01-24 16:13:08 -050024static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040029
Chris Masonb47eda82008-11-10 12:34:40 -050030#define LEAK_DEBUG 0
Chris Mason39351272009-02-04 09:24:05 -050031#if LEAK_DEBUG
Chris Masond3977122009-01-05 21:25:51 -050032static DEFINE_SPINLOCK(leak_lock);
Chris Mason4bef0842008-09-08 11:18:08 -040033#endif
Chris Masond1310b22008-01-24 16:13:08 -050034
Chris Masond1310b22008-01-24 16:13:08 -050035#define BUFFER_LRU_MAX 64
36
37struct tree_entry {
38 u64 start;
39 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -050040 struct rb_node rb_node;
41};
42
43struct extent_page_data {
44 struct bio *bio;
45 struct extent_io_tree *tree;
46 get_extent_t *get_extent;
Chris Mason771ed682008-11-06 22:02:51 -050047
48 /* tells writepage not to lock the state bits for this range
49 * it still does the unlocking
50 */
Chris Masonffbd5172009-04-20 15:50:09 -040051 unsigned int extent_locked:1;
52
53 /* tells the submit_bio code to use a WRITE_SYNC */
54 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -050055};
56
Josef Bacik0b32f4b2012-03-13 09:38:00 -040057static noinline void flush_write_bio(void *data);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -040058static inline struct btrfs_fs_info *
59tree_fs_info(struct extent_io_tree *tree)
60{
61 return btrfs_sb(tree->mapping->host->i_sb);
62}
Josef Bacik0b32f4b2012-03-13 09:38:00 -040063
Chris Masond1310b22008-01-24 16:13:08 -050064int __init extent_io_init(void)
65{
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020066 extent_state_cache = kmem_cache_create("extent_state",
67 sizeof(struct extent_state), 0,
68 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050069 if (!extent_state_cache)
70 return -ENOMEM;
71
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020072 extent_buffer_cache = kmem_cache_create("extent_buffers",
73 sizeof(struct extent_buffer), 0,
74 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050075 if (!extent_buffer_cache)
76 goto free_state_cache;
77 return 0;
78
79free_state_cache:
80 kmem_cache_destroy(extent_state_cache);
81 return -ENOMEM;
82}
83
84void extent_io_exit(void)
85{
86 struct extent_state *state;
Chris Mason2d2ae542008-03-26 16:24:23 -040087 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -050088
89 while (!list_empty(&states)) {
Chris Mason2d2ae542008-03-26 16:24:23 -040090 state = list_entry(states.next, struct extent_state, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050091 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
92 "state %lu in tree %p refs %d\n",
93 (unsigned long long)state->start,
94 (unsigned long long)state->end,
95 state->state, state->tree, atomic_read(&state->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040096 list_del(&state->leak_list);
Chris Masond1310b22008-01-24 16:13:08 -050097 kmem_cache_free(extent_state_cache, state);
98
99 }
100
Chris Mason2d2ae542008-03-26 16:24:23 -0400101 while (!list_empty(&buffers)) {
102 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Chris Masond3977122009-01-05 21:25:51 -0500103 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
104 "refs %d\n", (unsigned long long)eb->start,
105 eb->len, atomic_read(&eb->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -0400106 list_del(&eb->leak_list);
107 kmem_cache_free(extent_buffer_cache, eb);
108 }
Chris Masond1310b22008-01-24 16:13:08 -0500109 if (extent_state_cache)
110 kmem_cache_destroy(extent_state_cache);
111 if (extent_buffer_cache)
112 kmem_cache_destroy(extent_buffer_cache);
113}
114
115void extent_io_tree_init(struct extent_io_tree *tree,
David Sterbaf993c882011-04-20 23:35:57 +0200116 struct address_space *mapping)
Chris Masond1310b22008-01-24 16:13:08 -0500117{
Eric Paris6bef4d32010-02-23 19:43:04 +0000118 tree->state = RB_ROOT;
Miao Xie19fe0a82010-10-26 20:57:29 -0400119 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500120 tree->ops = NULL;
121 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500122 spin_lock_init(&tree->lock);
Chris Mason6af118c2008-07-22 11:18:07 -0400123 spin_lock_init(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -0500124 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500125}
Chris Masond1310b22008-01-24 16:13:08 -0500126
Christoph Hellwigb2950862008-12-02 09:54:17 -0500127static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500128{
129 struct extent_state *state;
Chris Mason39351272009-02-04 09:24:05 -0500130#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400131 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400132#endif
Chris Masond1310b22008-01-24 16:13:08 -0500133
134 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400135 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500136 return state;
137 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500138 state->private = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500139 state->tree = NULL;
Chris Mason39351272009-02-04 09:24:05 -0500140#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400141 spin_lock_irqsave(&leak_lock, flags);
142 list_add(&state->leak_list, &states);
143 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400144#endif
Chris Masond1310b22008-01-24 16:13:08 -0500145 atomic_set(&state->refs, 1);
146 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100147 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500148 return state;
149}
Chris Masond1310b22008-01-24 16:13:08 -0500150
Chris Mason4845e442010-05-25 20:56:50 -0400151void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500152{
Chris Masond1310b22008-01-24 16:13:08 -0500153 if (!state)
154 return;
155 if (atomic_dec_and_test(&state->refs)) {
Chris Mason39351272009-02-04 09:24:05 -0500156#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400157 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400158#endif
Chris Mason70dec802008-01-29 09:59:12 -0500159 WARN_ON(state->tree);
Chris Mason39351272009-02-04 09:24:05 -0500160#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400161 spin_lock_irqsave(&leak_lock, flags);
162 list_del(&state->leak_list);
163 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400164#endif
Jeff Mahoney143bede2012-03-01 14:56:26 +0100165 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500166 kmem_cache_free(extent_state_cache, state);
167 }
168}
Chris Masond1310b22008-01-24 16:13:08 -0500169
170static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
171 struct rb_node *node)
172{
Chris Masond3977122009-01-05 21:25:51 -0500173 struct rb_node **p = &root->rb_node;
174 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500175 struct tree_entry *entry;
176
Chris Masond3977122009-01-05 21:25:51 -0500177 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500178 parent = *p;
179 entry = rb_entry(parent, struct tree_entry, rb_node);
180
181 if (offset < entry->start)
182 p = &(*p)->rb_left;
183 else if (offset > entry->end)
184 p = &(*p)->rb_right;
185 else
186 return parent;
187 }
188
189 entry = rb_entry(node, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500190 rb_link_node(node, parent, p);
191 rb_insert_color(node, root);
192 return NULL;
193}
194
Chris Mason80ea96b2008-02-01 14:51:59 -0500195static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Chris Masond1310b22008-01-24 16:13:08 -0500196 struct rb_node **prev_ret,
197 struct rb_node **next_ret)
198{
Chris Mason80ea96b2008-02-01 14:51:59 -0500199 struct rb_root *root = &tree->state;
Chris Masond3977122009-01-05 21:25:51 -0500200 struct rb_node *n = root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500201 struct rb_node *prev = NULL;
202 struct rb_node *orig_prev = NULL;
203 struct tree_entry *entry;
204 struct tree_entry *prev_entry = NULL;
205
Chris Masond3977122009-01-05 21:25:51 -0500206 while (n) {
Chris Masond1310b22008-01-24 16:13:08 -0500207 entry = rb_entry(n, struct tree_entry, rb_node);
208 prev = n;
209 prev_entry = entry;
210
211 if (offset < entry->start)
212 n = n->rb_left;
213 else if (offset > entry->end)
214 n = n->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500215 else
Chris Masond1310b22008-01-24 16:13:08 -0500216 return n;
217 }
218
219 if (prev_ret) {
220 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500221 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500222 prev = rb_next(prev);
223 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 }
225 *prev_ret = prev;
226 prev = orig_prev;
227 }
228
229 if (next_ret) {
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500231 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500232 prev = rb_prev(prev);
233 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234 }
235 *next_ret = prev;
236 }
237 return NULL;
238}
239
Chris Mason80ea96b2008-02-01 14:51:59 -0500240static inline struct rb_node *tree_search(struct extent_io_tree *tree,
241 u64 offset)
Chris Masond1310b22008-01-24 16:13:08 -0500242{
Chris Mason70dec802008-01-29 09:59:12 -0500243 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500244 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500245
Chris Mason80ea96b2008-02-01 14:51:59 -0500246 ret = __etree_search(tree, offset, &prev, NULL);
Chris Masond3977122009-01-05 21:25:51 -0500247 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500248 return prev;
249 return ret;
250}
251
Josef Bacik9ed74f22009-09-11 16:12:44 -0400252static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
253 struct extent_state *other)
254{
255 if (tree->ops && tree->ops->merge_extent_hook)
256 tree->ops->merge_extent_hook(tree->mapping->host, new,
257 other);
258}
259
Chris Masond1310b22008-01-24 16:13:08 -0500260/*
261 * utility function to look for merge candidates inside a given range.
262 * Any extents with matching state are merged together into a single
263 * extent in the tree. Extents with EXTENT_IO in their state field
264 * are not merged because the end_io handlers need to be able to do
265 * operations on them without sleeping (or doing allocations/splits).
266 *
267 * This should be called with the tree lock held.
268 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000269static void merge_state(struct extent_io_tree *tree,
270 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500271{
272 struct extent_state *other;
273 struct rb_node *other_node;
274
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400275 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000276 return;
Chris Masond1310b22008-01-24 16:13:08 -0500277
278 other_node = rb_prev(&state->rb_node);
279 if (other_node) {
280 other = rb_entry(other_node, struct extent_state, rb_node);
281 if (other->end == state->start - 1 &&
282 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400283 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500284 state->start = other->start;
Chris Mason70dec802008-01-29 09:59:12 -0500285 other->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(other);
288 }
289 }
290 other_node = rb_next(&state->rb_node);
291 if (other_node) {
292 other = rb_entry(other_node, struct extent_state, rb_node);
293 if (other->start == state->end + 1 &&
294 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400295 merge_cb(tree, state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400296 state->end = other->end;
297 other->tree = NULL;
298 rb_erase(&other->rb_node, &tree->state);
299 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500300 }
301 }
Chris Masond1310b22008-01-24 16:13:08 -0500302}
303
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000304static void set_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400305 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500306{
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000307 if (tree->ops && tree->ops->set_bit_hook)
308 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500309}
310
311static void clear_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400312 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500313{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400314 if (tree->ops && tree->ops->clear_bit_hook)
315 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500316}
317
Xiao Guangrong3150b692011-07-14 03:19:08 +0000318static void set_state_bits(struct extent_io_tree *tree,
319 struct extent_state *state, int *bits);
320
Chris Masond1310b22008-01-24 16:13:08 -0500321/*
322 * insert an extent_state struct into the tree. 'bits' are set on the
323 * struct before it is inserted.
324 *
325 * This may return -EEXIST if the extent is already there, in which case the
326 * state struct is freed.
327 *
328 * The tree lock is not taken internally. This is a utility function and
329 * probably isn't what you want to call (see set/clear_extent_bit).
330 */
331static int insert_state(struct extent_io_tree *tree,
332 struct extent_state *state, u64 start, u64 end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400333 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500334{
335 struct rb_node *node;
336
337 if (end < start) {
Chris Masond3977122009-01-05 21:25:51 -0500338 printk(KERN_ERR "btrfs end < start %llu %llu\n",
339 (unsigned long long)end,
340 (unsigned long long)start);
Chris Masond1310b22008-01-24 16:13:08 -0500341 WARN_ON(1);
342 }
Chris Masond1310b22008-01-24 16:13:08 -0500343 state->start = start;
344 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400345
Xiao Guangrong3150b692011-07-14 03:19:08 +0000346 set_state_bits(tree, state, bits);
347
Chris Masond1310b22008-01-24 16:13:08 -0500348 node = tree_insert(&tree->state, end, &state->rb_node);
349 if (node) {
350 struct extent_state *found;
351 found = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500352 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
353 "%llu %llu\n", (unsigned long long)found->start,
354 (unsigned long long)found->end,
355 (unsigned long long)start, (unsigned long long)end);
Chris Masond1310b22008-01-24 16:13:08 -0500356 return -EEXIST;
357 }
Chris Mason70dec802008-01-29 09:59:12 -0500358 state->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500359 merge_state(tree, state);
360 return 0;
361}
362
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000363static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
Josef Bacik9ed74f22009-09-11 16:12:44 -0400364 u64 split)
365{
366 if (tree->ops && tree->ops->split_extent_hook)
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000367 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400368}
369
Chris Masond1310b22008-01-24 16:13:08 -0500370/*
371 * split a given extent state struct in two, inserting the preallocated
372 * struct 'prealloc' as the newly created second half. 'split' indicates an
373 * offset inside 'orig' where it should be split.
374 *
375 * Before calling,
376 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
377 * are two extent state structs in the tree:
378 * prealloc: [orig->start, split - 1]
379 * orig: [ split, orig->end ]
380 *
381 * The tree locks are not taken by this function. They need to be held
382 * by the caller.
383 */
384static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
385 struct extent_state *prealloc, u64 split)
386{
387 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400388
389 split_cb(tree, orig, split);
390
Chris Masond1310b22008-01-24 16:13:08 -0500391 prealloc->start = orig->start;
392 prealloc->end = split - 1;
393 prealloc->state = orig->state;
394 orig->start = split;
395
396 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500398 free_extent_state(prealloc);
399 return -EEXIST;
400 }
Chris Mason70dec802008-01-29 09:59:12 -0500401 prealloc->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500402 return 0;
403}
404
Li Zefancdc6a392012-03-12 16:39:48 +0800405static struct extent_state *next_state(struct extent_state *state)
406{
407 struct rb_node *next = rb_next(&state->rb_node);
408 if (next)
409 return rb_entry(next, struct extent_state, rb_node);
410 else
411 return NULL;
412}
413
Chris Masond1310b22008-01-24 16:13:08 -0500414/*
415 * utility function to clear some bits in an extent state struct.
Li Zefan8e52acf2012-03-12 16:39:28 +0800416 * it will optionally wake up any one waiting on this state (wake == 1)
Chris Masond1310b22008-01-24 16:13:08 -0500417 *
418 * If no bits are set on the state struct after clearing things, the
419 * struct is freed and removed from the tree
420 */
Li Zefancdc6a392012-03-12 16:39:48 +0800421static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
422 struct extent_state *state,
423 int *bits, int wake)
Chris Masond1310b22008-01-24 16:13:08 -0500424{
Li Zefancdc6a392012-03-12 16:39:48 +0800425 struct extent_state *next;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400426 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
Chris Masond1310b22008-01-24 16:13:08 -0500427
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400428 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500429 u64 range = state->end - state->start + 1;
430 WARN_ON(range > tree->dirty_bytes);
431 tree->dirty_bytes -= range;
432 }
Chris Mason291d6732008-01-29 15:55:23 -0500433 clear_state_cb(tree, state, bits);
Josef Bacik32c00af2009-10-08 13:34:05 -0400434 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500435 if (wake)
436 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400437 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800438 next = next_state(state);
Chris Mason70dec802008-01-29 09:59:12 -0500439 if (state->tree) {
Chris Masond1310b22008-01-24 16:13:08 -0500440 rb_erase(&state->rb_node, &tree->state);
Chris Mason70dec802008-01-29 09:59:12 -0500441 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500442 free_extent_state(state);
443 } else {
444 WARN_ON(1);
445 }
446 } else {
447 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800448 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500449 }
Li Zefancdc6a392012-03-12 16:39:48 +0800450 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500451}
452
Xiao Guangrong82337672011-04-20 06:44:57 +0000453static struct extent_state *
454alloc_extent_state_atomic(struct extent_state *prealloc)
455{
456 if (!prealloc)
457 prealloc = alloc_extent_state(GFP_ATOMIC);
458
459 return prealloc;
460}
461
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400462void extent_io_tree_panic(struct extent_io_tree *tree, int err)
463{
464 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
465 "Extent tree was modified by another "
466 "thread while locked.");
467}
468
Chris Masond1310b22008-01-24 16:13:08 -0500469/*
470 * clear some bits on a range in the tree. This may require splitting
471 * or inserting elements in the tree, so the gfp mask is used to
472 * indicate which allocations or sleeping are allowed.
473 *
474 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
475 * the given range from the tree regardless of state (ie for truncate).
476 *
477 * the range [start, end] is inclusive.
478 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100479 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500480 */
481int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -0400482 int bits, int wake, int delete,
483 struct extent_state **cached_state,
484 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500485{
486 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400487 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500488 struct extent_state *prealloc = NULL;
489 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400490 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500491 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000492 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500493
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400494 if (delete)
495 bits |= ~EXTENT_CTLBITS;
496 bits |= EXTENT_FIRST_DELALLOC;
497
Josef Bacik2ac55d42010-02-03 19:33:23 +0000498 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
499 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500500again:
501 if (!prealloc && (mask & __GFP_WAIT)) {
502 prealloc = alloc_extent_state(mask);
503 if (!prealloc)
504 return -ENOMEM;
505 }
506
Chris Masoncad321a2008-12-17 14:51:42 -0500507 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400508 if (cached_state) {
509 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000510
511 if (clear) {
512 *cached_state = NULL;
513 cached_state = NULL;
514 }
515
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400516 if (cached && cached->tree && cached->start <= start &&
517 cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000518 if (clear)
519 atomic_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400520 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400521 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400522 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000523 if (clear)
524 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400525 }
Chris Masond1310b22008-01-24 16:13:08 -0500526 /*
527 * this search will find the extents that end after
528 * our range starts
529 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500530 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500531 if (!node)
532 goto out;
533 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400534hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500535 if (state->start > end)
536 goto out;
537 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400538 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500539
Liu Bo04493142012-02-16 18:34:37 +0800540 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800541 if (!(state->state & bits)) {
542 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800543 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800544 }
Liu Bo04493142012-02-16 18:34:37 +0800545
Chris Masond1310b22008-01-24 16:13:08 -0500546 /*
547 * | ---- desired range ---- |
548 * | state | or
549 * | ------------- state -------------- |
550 *
551 * We need to split the extent we found, and may flip
552 * bits on second half.
553 *
554 * If the extent we found extends past our range, we
555 * just split and search again. It'll get split again
556 * the next time though.
557 *
558 * If the extent we found is inside our range, we clear
559 * the desired bit on it.
560 */
561
562 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000563 prealloc = alloc_extent_state_atomic(prealloc);
564 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500565 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400566 if (err)
567 extent_io_tree_panic(tree, err);
568
Chris Masond1310b22008-01-24 16:13:08 -0500569 prealloc = NULL;
570 if (err)
571 goto out;
572 if (state->end <= end) {
Jeff Mahoney6763af82012-03-01 14:56:29 +0100573 clear_state_bit(tree, state, &bits, wake);
Yan Zheng5c939df2009-05-27 09:16:03 -0400574 if (last_end == (u64)-1)
575 goto out;
576 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500577 }
578 goto search_again;
579 }
580 /*
581 * | ---- desired range ---- |
582 * | state |
583 * We need to split the extent, and clear the bit
584 * on the first half
585 */
586 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000587 prealloc = alloc_extent_state_atomic(prealloc);
588 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500589 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400590 if (err)
591 extent_io_tree_panic(tree, err);
592
Chris Masond1310b22008-01-24 16:13:08 -0500593 if (wake)
594 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400595
Jeff Mahoney6763af82012-03-01 14:56:29 +0100596 clear_state_bit(tree, prealloc, &bits, wake);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400597
Chris Masond1310b22008-01-24 16:13:08 -0500598 prealloc = NULL;
599 goto out;
600 }
Chris Mason42daec22009-09-23 19:51:09 -0400601
Li Zefancdc6a392012-03-12 16:39:48 +0800602 state = clear_state_bit(tree, state, &bits, wake);
Liu Bo04493142012-02-16 18:34:37 +0800603next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400604 if (last_end == (u64)-1)
605 goto out;
606 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800607 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800608 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500609 goto search_again;
610
611out:
Chris Masoncad321a2008-12-17 14:51:42 -0500612 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500613 if (prealloc)
614 free_extent_state(prealloc);
615
Jeff Mahoney6763af82012-03-01 14:56:29 +0100616 return 0;
Chris Masond1310b22008-01-24 16:13:08 -0500617
618search_again:
619 if (start > end)
620 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500621 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500622 if (mask & __GFP_WAIT)
623 cond_resched();
624 goto again;
625}
Chris Masond1310b22008-01-24 16:13:08 -0500626
Jeff Mahoney143bede2012-03-01 14:56:26 +0100627static void wait_on_state(struct extent_io_tree *tree,
628 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500629 __releases(tree->lock)
630 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500631{
632 DEFINE_WAIT(wait);
633 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500634 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500635 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500636 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500637 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500638}
639
640/*
641 * waits for one or more bits to clear on a range in the state tree.
642 * The range [start, end] is inclusive.
643 * The tree lock is taken by this function
644 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100645void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
Chris Masond1310b22008-01-24 16:13:08 -0500646{
647 struct extent_state *state;
648 struct rb_node *node;
649
Chris Masoncad321a2008-12-17 14:51:42 -0500650 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500651again:
652 while (1) {
653 /*
654 * this search will find all the extents that end after
655 * our range starts
656 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500657 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500658 if (!node)
659 break;
660
661 state = rb_entry(node, struct extent_state, rb_node);
662
663 if (state->start > end)
664 goto out;
665
666 if (state->state & bits) {
667 start = state->start;
668 atomic_inc(&state->refs);
669 wait_on_state(tree, state);
670 free_extent_state(state);
671 goto again;
672 }
673 start = state->end + 1;
674
675 if (start > end)
676 break;
677
Xiao Guangrongded91f02011-07-14 03:19:27 +0000678 cond_resched_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500679 }
680out:
Chris Masoncad321a2008-12-17 14:51:42 -0500681 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500682}
Chris Masond1310b22008-01-24 16:13:08 -0500683
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000684static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500685 struct extent_state *state,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400686 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500687{
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400688 int bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400689
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000690 set_state_cb(tree, state, bits);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400691 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500692 u64 range = state->end - state->start + 1;
693 tree->dirty_bytes += range;
694 }
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400695 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500696}
697
Chris Mason2c64c532009-09-02 15:04:12 -0400698static void cache_state(struct extent_state *state,
699 struct extent_state **cached_ptr)
700{
701 if (cached_ptr && !(*cached_ptr)) {
702 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
703 *cached_ptr = state;
704 atomic_inc(&state->refs);
705 }
706 }
707}
708
Arne Jansen507903b2011-04-06 10:02:20 +0000709static void uncache_state(struct extent_state **cached_ptr)
710{
711 if (cached_ptr && (*cached_ptr)) {
712 struct extent_state *state = *cached_ptr;
Chris Mason109b36a2011-04-12 13:57:39 -0400713 *cached_ptr = NULL;
714 free_extent_state(state);
Arne Jansen507903b2011-04-06 10:02:20 +0000715 }
716}
717
Chris Masond1310b22008-01-24 16:13:08 -0500718/*
Chris Mason1edbb732009-09-02 13:24:36 -0400719 * set some bits on a range in the tree. This may require allocations or
720 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500721 *
Chris Mason1edbb732009-09-02 13:24:36 -0400722 * If any of the exclusive bits are set, this will fail with -EEXIST if some
723 * part of the range already has the desired bits set. The start of the
724 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500725 *
Chris Mason1edbb732009-09-02 13:24:36 -0400726 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500727 */
Chris Mason1edbb732009-09-02 13:24:36 -0400728
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100729static int __must_check
730__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
731 int bits, int exclusive_bits, u64 *failed_start,
732 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500733{
734 struct extent_state *state;
735 struct extent_state *prealloc = NULL;
736 struct rb_node *node;
Chris Masond1310b22008-01-24 16:13:08 -0500737 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500738 u64 last_start;
739 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400740
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400741 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500742again:
743 if (!prealloc && (mask & __GFP_WAIT)) {
744 prealloc = alloc_extent_state(mask);
Xiao Guangrong82337672011-04-20 06:44:57 +0000745 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500746 }
747
Chris Masoncad321a2008-12-17 14:51:42 -0500748 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400749 if (cached_state && *cached_state) {
750 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400751 if (state->start <= start && state->end > start &&
752 state->tree) {
Chris Mason9655d292009-09-02 15:22:30 -0400753 node = &state->rb_node;
754 goto hit_next;
755 }
756 }
Chris Masond1310b22008-01-24 16:13:08 -0500757 /*
758 * this search will find all the extents that end after
759 * our range starts.
760 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500761 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500762 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000763 prealloc = alloc_extent_state_atomic(prealloc);
764 BUG_ON(!prealloc);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400765 err = insert_state(tree, prealloc, start, end, &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400766 if (err)
767 extent_io_tree_panic(tree, err);
768
Chris Masond1310b22008-01-24 16:13:08 -0500769 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500770 goto out;
771 }
Chris Masond1310b22008-01-24 16:13:08 -0500772 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400773hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500774 last_start = state->start;
775 last_end = state->end;
776
777 /*
778 * | ---- desired range ---- |
779 * | state |
780 *
781 * Just lock what we found and keep going
782 */
783 if (state->start == start && state->end <= end) {
Chris Mason40431d62009-08-05 12:57:59 -0400784 struct rb_node *next_node;
Chris Mason1edbb732009-09-02 13:24:36 -0400785 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500786 *failed_start = state->start;
787 err = -EEXIST;
788 goto out;
789 }
Chris Mason42daec22009-09-23 19:51:09 -0400790
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000791 set_state_bits(tree, state, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400792
Chris Mason2c64c532009-09-02 15:04:12 -0400793 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500794 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400795 if (last_end == (u64)-1)
796 goto out;
Chris Mason40431d62009-08-05 12:57:59 -0400797
Yan Zheng5c939df2009-05-27 09:16:03 -0400798 start = last_end + 1;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400799 next_node = rb_next(&state->rb_node);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000800 if (next_node && start < end && prealloc && !need_resched()) {
801 state = rb_entry(next_node, struct extent_state,
802 rb_node);
803 if (state->start == start)
804 goto hit_next;
Chris Mason40431d62009-08-05 12:57:59 -0400805 }
Chris Masond1310b22008-01-24 16:13:08 -0500806 goto search_again;
807 }
808
809 /*
810 * | ---- desired range ---- |
811 * | state |
812 * or
813 * | ------------- state -------------- |
814 *
815 * We need to split the extent we found, and may flip bits on
816 * second half.
817 *
818 * If the extent we found extends past our
819 * range, we just split and search again. It'll get split
820 * again the next time though.
821 *
822 * If the extent we found is inside our range, we set the
823 * desired bit on it.
824 */
825 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400826 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500827 *failed_start = start;
828 err = -EEXIST;
829 goto out;
830 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000831
832 prealloc = alloc_extent_state_atomic(prealloc);
833 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500834 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400835 if (err)
836 extent_io_tree_panic(tree, err);
837
Chris Masond1310b22008-01-24 16:13:08 -0500838 prealloc = NULL;
839 if (err)
840 goto out;
841 if (state->end <= end) {
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000842 set_state_bits(tree, state, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400843 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500844 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400845 if (last_end == (u64)-1)
846 goto out;
847 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500848 }
849 goto search_again;
850 }
851 /*
852 * | ---- desired range ---- |
853 * | state | or | state |
854 *
855 * There's a hole, we need to insert something in it and
856 * ignore the extent we found.
857 */
858 if (state->start > start) {
859 u64 this_end;
860 if (end < last_start)
861 this_end = end;
862 else
Chris Masond3977122009-01-05 21:25:51 -0500863 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +0000864
865 prealloc = alloc_extent_state_atomic(prealloc);
866 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000867
868 /*
869 * Avoid to free 'prealloc' if it can be merged with
870 * the later extent.
871 */
Chris Masond1310b22008-01-24 16:13:08 -0500872 err = insert_state(tree, prealloc, start, this_end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400873 &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400874 if (err)
875 extent_io_tree_panic(tree, err);
876
Chris Mason2c64c532009-09-02 15:04:12 -0400877 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500878 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500879 start = this_end + 1;
880 goto search_again;
881 }
882 /*
883 * | ---- desired range ---- |
884 * | state |
885 * We need to split the extent, and set the bit
886 * on the first half
887 */
888 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400889 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500890 *failed_start = start;
891 err = -EEXIST;
892 goto out;
893 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000894
895 prealloc = alloc_extent_state_atomic(prealloc);
896 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500897 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400898 if (err)
899 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -0500900
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000901 set_state_bits(tree, prealloc, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400902 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500903 merge_state(tree, prealloc);
904 prealloc = NULL;
905 goto out;
906 }
907
908 goto search_again;
909
910out:
Chris Masoncad321a2008-12-17 14:51:42 -0500911 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500912 if (prealloc)
913 free_extent_state(prealloc);
914
915 return err;
916
917search_again:
918 if (start > end)
919 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500920 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500921 if (mask & __GFP_WAIT)
922 cond_resched();
923 goto again;
924}
Chris Masond1310b22008-01-24 16:13:08 -0500925
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100926int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
927 u64 *failed_start, struct extent_state **cached_state,
928 gfp_t mask)
929{
930 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
931 cached_state, mask);
932}
933
934
Josef Bacik462d6fa2011-09-26 13:56:12 -0400935/**
Liu Bo10983f22012-07-11 15:26:19 +0800936 * convert_extent_bit - convert all bits in a given range from one bit to
937 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -0400938 * @tree: the io tree to search
939 * @start: the start offset in bytes
940 * @end: the end offset in bytes (inclusive)
941 * @bits: the bits to set in this range
942 * @clear_bits: the bits to clear in this range
943 * @mask: the allocation mask
944 *
945 * This will go through and set bits for the given range. If any states exist
946 * already in this range they are set with the given bit and cleared of the
947 * clear_bits. This is only meant to be used by things that are mergeable, ie
948 * converting from say DELALLOC to DIRTY. This is not meant to be used with
949 * boundary bits like LOCK.
950 */
951int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
952 int bits, int clear_bits, gfp_t mask)
953{
954 struct extent_state *state;
955 struct extent_state *prealloc = NULL;
956 struct rb_node *node;
957 int err = 0;
958 u64 last_start;
959 u64 last_end;
960
961again:
962 if (!prealloc && (mask & __GFP_WAIT)) {
963 prealloc = alloc_extent_state(mask);
964 if (!prealloc)
965 return -ENOMEM;
966 }
967
968 spin_lock(&tree->lock);
969 /*
970 * this search will find all the extents that end after
971 * our range starts.
972 */
973 node = tree_search(tree, start);
974 if (!node) {
975 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -0500976 if (!prealloc) {
977 err = -ENOMEM;
978 goto out;
979 }
Josef Bacik462d6fa2011-09-26 13:56:12 -0400980 err = insert_state(tree, prealloc, start, end, &bits);
981 prealloc = NULL;
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400982 if (err)
983 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -0400984 goto out;
985 }
986 state = rb_entry(node, struct extent_state, rb_node);
987hit_next:
988 last_start = state->start;
989 last_end = state->end;
990
991 /*
992 * | ---- desired range ---- |
993 * | state |
994 *
995 * Just lock what we found and keep going
996 */
997 if (state->start == start && state->end <= end) {
998 struct rb_node *next_node;
999
1000 set_state_bits(tree, state, &bits);
1001 clear_state_bit(tree, state, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001002 if (last_end == (u64)-1)
1003 goto out;
1004
1005 start = last_end + 1;
1006 next_node = rb_next(&state->rb_node);
1007 if (next_node && start < end && prealloc && !need_resched()) {
1008 state = rb_entry(next_node, struct extent_state,
1009 rb_node);
1010 if (state->start == start)
1011 goto hit_next;
1012 }
1013 goto search_again;
1014 }
1015
1016 /*
1017 * | ---- desired range ---- |
1018 * | state |
1019 * or
1020 * | ------------- state -------------- |
1021 *
1022 * We need to split the extent we found, and may flip bits on
1023 * second half.
1024 *
1025 * If the extent we found extends past our
1026 * range, we just split and search again. It'll get split
1027 * again the next time though.
1028 *
1029 * If the extent we found is inside our range, we set the
1030 * desired bit on it.
1031 */
1032 if (state->start < start) {
1033 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001034 if (!prealloc) {
1035 err = -ENOMEM;
1036 goto out;
1037 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001038 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001039 if (err)
1040 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001041 prealloc = NULL;
1042 if (err)
1043 goto out;
1044 if (state->end <= end) {
1045 set_state_bits(tree, state, &bits);
1046 clear_state_bit(tree, state, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001047 if (last_end == (u64)-1)
1048 goto out;
1049 start = last_end + 1;
1050 }
1051 goto search_again;
1052 }
1053 /*
1054 * | ---- desired range ---- |
1055 * | state | or | state |
1056 *
1057 * There's a hole, we need to insert something in it and
1058 * ignore the extent we found.
1059 */
1060 if (state->start > start) {
1061 u64 this_end;
1062 if (end < last_start)
1063 this_end = end;
1064 else
1065 this_end = last_start - 1;
1066
1067 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001068 if (!prealloc) {
1069 err = -ENOMEM;
1070 goto out;
1071 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001072
1073 /*
1074 * Avoid to free 'prealloc' if it can be merged with
1075 * the later extent.
1076 */
1077 err = insert_state(tree, prealloc, start, this_end,
1078 &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001079 if (err)
1080 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001081 prealloc = NULL;
1082 start = this_end + 1;
1083 goto search_again;
1084 }
1085 /*
1086 * | ---- desired range ---- |
1087 * | state |
1088 * We need to split the extent, and set the bit
1089 * on the first half
1090 */
1091 if (state->start <= end && state->end > end) {
1092 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001093 if (!prealloc) {
1094 err = -ENOMEM;
1095 goto out;
1096 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001097
1098 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001099 if (err)
1100 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001101
1102 set_state_bits(tree, prealloc, &bits);
1103 clear_state_bit(tree, prealloc, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001104 prealloc = NULL;
1105 goto out;
1106 }
1107
1108 goto search_again;
1109
1110out:
1111 spin_unlock(&tree->lock);
1112 if (prealloc)
1113 free_extent_state(prealloc);
1114
1115 return err;
1116
1117search_again:
1118 if (start > end)
1119 goto out;
1120 spin_unlock(&tree->lock);
1121 if (mask & __GFP_WAIT)
1122 cond_resched();
1123 goto again;
1124}
1125
Chris Masond1310b22008-01-24 16:13:08 -05001126/* wrappers around set/clear extent bit */
1127int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1128 gfp_t mask)
1129{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001130 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001131 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001132}
Chris Masond1310b22008-01-24 16:13:08 -05001133
1134int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1135 int bits, gfp_t mask)
1136{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001137 return set_extent_bit(tree, start, end, bits, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001138 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001139}
Chris Masond1310b22008-01-24 16:13:08 -05001140
1141int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1142 int bits, gfp_t mask)
1143{
Chris Mason2c64c532009-09-02 15:04:12 -04001144 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001145}
Chris Masond1310b22008-01-24 16:13:08 -05001146
1147int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001148 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001149{
1150 return set_extent_bit(tree, start, end,
Liu Bofee187d2011-09-29 15:55:28 +08001151 EXTENT_DELALLOC | EXTENT_UPTODATE,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001152 NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001153}
Chris Masond1310b22008-01-24 16:13:08 -05001154
1155int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1156 gfp_t mask)
1157{
1158 return clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04001159 EXTENT_DIRTY | EXTENT_DELALLOC |
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -04001160 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001161}
Chris Masond1310b22008-01-24 16:13:08 -05001162
1163int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1164 gfp_t mask)
1165{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001166 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001167 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001168}
Chris Masond1310b22008-01-24 16:13:08 -05001169
Chris Masond1310b22008-01-24 16:13:08 -05001170int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
Arne Jansen507903b2011-04-06 10:02:20 +00001171 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001172{
Arne Jansen507903b2011-04-06 10:02:20 +00001173 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001174 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001175}
Chris Masond1310b22008-01-24 16:13:08 -05001176
Chris Masond3977122009-01-05 21:25:51 -05001177static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001178 u64 end, struct extent_state **cached_state,
1179 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001180{
Chris Mason2c64c532009-09-02 15:04:12 -04001181 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001182 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001183}
Chris Masond1310b22008-01-24 16:13:08 -05001184
Chris Masond352ac62008-09-29 15:18:18 -04001185/*
1186 * either insert or lock state struct between start and end use mask to tell
1187 * us if waiting is desired.
1188 */
Chris Mason1edbb732009-09-02 13:24:36 -04001189int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001190 int bits, struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001191{
1192 int err;
1193 u64 failed_start;
1194 while (1) {
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001195 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1196 EXTENT_LOCKED, &failed_start,
1197 cached_state, GFP_NOFS);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001198 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001199 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1200 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001201 } else
Chris Masond1310b22008-01-24 16:13:08 -05001202 break;
Chris Masond1310b22008-01-24 16:13:08 -05001203 WARN_ON(start > end);
1204 }
1205 return err;
1206}
Chris Masond1310b22008-01-24 16:13:08 -05001207
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001208int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Chris Mason1edbb732009-09-02 13:24:36 -04001209{
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001210 return lock_extent_bits(tree, start, end, 0, NULL);
Chris Mason1edbb732009-09-02 13:24:36 -04001211}
1212
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001213int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001214{
1215 int err;
1216 u64 failed_start;
1217
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001218 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1219 &failed_start, NULL, GFP_NOFS);
Yan Zheng66435582008-10-30 14:19:50 -04001220 if (err == -EEXIST) {
1221 if (failed_start > start)
1222 clear_extent_bit(tree, start, failed_start - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001223 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
Josef Bacik25179202008-10-29 14:49:05 -04001224 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001225 }
Josef Bacik25179202008-10-29 14:49:05 -04001226 return 1;
1227}
Josef Bacik25179202008-10-29 14:49:05 -04001228
Chris Mason2c64c532009-09-02 15:04:12 -04001229int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1230 struct extent_state **cached, gfp_t mask)
1231{
1232 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1233 mask);
1234}
1235
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001236int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001237{
Chris Mason2c64c532009-09-02 15:04:12 -04001238 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001239 GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05001240}
Chris Masond1310b22008-01-24 16:13:08 -05001241
1242/*
Chris Masond1310b22008-01-24 16:13:08 -05001243 * helper function to set both pages and extents in the tree writeback
1244 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05001245static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001246{
1247 unsigned long index = start >> PAGE_CACHE_SHIFT;
1248 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1249 struct page *page;
1250
1251 while (index <= end_index) {
1252 page = find_get_page(tree->mapping, index);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001253 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Masond1310b22008-01-24 16:13:08 -05001254 set_page_writeback(page);
1255 page_cache_release(page);
1256 index++;
1257 }
Chris Masond1310b22008-01-24 16:13:08 -05001258 return 0;
1259}
Chris Masond1310b22008-01-24 16:13:08 -05001260
Chris Masond352ac62008-09-29 15:18:18 -04001261/* find the first state struct with 'bits' set after 'start', and
1262 * return it. tree->lock must be held. NULL will returned if
1263 * nothing was found after 'start'
1264 */
Chris Masond7fc6402008-02-18 12:12:38 -05001265struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1266 u64 start, int bits)
1267{
1268 struct rb_node *node;
1269 struct extent_state *state;
1270
1271 /*
1272 * this search will find all the extents that end after
1273 * our range starts.
1274 */
1275 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001276 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001277 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001278
Chris Masond3977122009-01-05 21:25:51 -05001279 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001280 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001281 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001282 return state;
Chris Masond3977122009-01-05 21:25:51 -05001283
Chris Masond7fc6402008-02-18 12:12:38 -05001284 node = rb_next(node);
1285 if (!node)
1286 break;
1287 }
1288out:
1289 return NULL;
1290}
Chris Masond7fc6402008-02-18 12:12:38 -05001291
Chris Masond352ac62008-09-29 15:18:18 -04001292/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001293 * find the first offset in the io tree with 'bits' set. zero is
1294 * returned if we find something, and *start_ret and *end_ret are
1295 * set to reflect the state struct that was found.
1296 *
1297 * If nothing was found, 1 is returned, < 0 on error
1298 */
1299int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1300 u64 *start_ret, u64 *end_ret, int bits)
1301{
1302 struct extent_state *state;
1303 int ret = 1;
1304
1305 spin_lock(&tree->lock);
1306 state = find_first_extent_bit_state(tree, start, bits);
1307 if (state) {
1308 *start_ret = state->start;
1309 *end_ret = state->end;
1310 ret = 0;
1311 }
1312 spin_unlock(&tree->lock);
1313 return ret;
1314}
1315
1316/*
Chris Masond352ac62008-09-29 15:18:18 -04001317 * find a contiguous range of bytes in the file marked as delalloc, not
1318 * more than 'max_bytes'. start and end are used to return the range,
1319 *
1320 * 1 is returned if we find something, 0 if nothing was in the tree
1321 */
Chris Masonc8b97812008-10-29 14:49:59 -04001322static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001323 u64 *start, u64 *end, u64 max_bytes,
1324 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001325{
1326 struct rb_node *node;
1327 struct extent_state *state;
1328 u64 cur_start = *start;
1329 u64 found = 0;
1330 u64 total_bytes = 0;
1331
Chris Masoncad321a2008-12-17 14:51:42 -05001332 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001333
Chris Masond1310b22008-01-24 16:13:08 -05001334 /*
1335 * this search will find all the extents that end after
1336 * our range starts.
1337 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001338 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001339 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001340 if (!found)
1341 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001342 goto out;
1343 }
1344
Chris Masond3977122009-01-05 21:25:51 -05001345 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001346 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001347 if (found && (state->start != cur_start ||
1348 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001349 goto out;
1350 }
1351 if (!(state->state & EXTENT_DELALLOC)) {
1352 if (!found)
1353 *end = state->end;
1354 goto out;
1355 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001356 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001357 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001358 *cached_state = state;
1359 atomic_inc(&state->refs);
1360 }
Chris Masond1310b22008-01-24 16:13:08 -05001361 found++;
1362 *end = state->end;
1363 cur_start = state->end + 1;
1364 node = rb_next(node);
1365 if (!node)
1366 break;
1367 total_bytes += state->end - state->start + 1;
1368 if (total_bytes >= max_bytes)
1369 break;
1370 }
1371out:
Chris Masoncad321a2008-12-17 14:51:42 -05001372 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001373 return found;
1374}
1375
Jeff Mahoney143bede2012-03-01 14:56:26 +01001376static noinline void __unlock_for_delalloc(struct inode *inode,
1377 struct page *locked_page,
1378 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001379{
1380 int ret;
1381 struct page *pages[16];
1382 unsigned long index = start >> PAGE_CACHE_SHIFT;
1383 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1384 unsigned long nr_pages = end_index - index + 1;
1385 int i;
1386
1387 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001388 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001389
Chris Masond3977122009-01-05 21:25:51 -05001390 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001391 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001392 min_t(unsigned long, nr_pages,
1393 ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001394 for (i = 0; i < ret; i++) {
1395 if (pages[i] != locked_page)
1396 unlock_page(pages[i]);
1397 page_cache_release(pages[i]);
1398 }
1399 nr_pages -= ret;
1400 index += ret;
1401 cond_resched();
1402 }
Chris Masonc8b97812008-10-29 14:49:59 -04001403}
1404
1405static noinline int lock_delalloc_pages(struct inode *inode,
1406 struct page *locked_page,
1407 u64 delalloc_start,
1408 u64 delalloc_end)
1409{
1410 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1411 unsigned long start_index = index;
1412 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1413 unsigned long pages_locked = 0;
1414 struct page *pages[16];
1415 unsigned long nrpages;
1416 int ret;
1417 int i;
1418
1419 /* the caller is responsible for locking the start index */
1420 if (index == locked_page->index && index == end_index)
1421 return 0;
1422
1423 /* skip the page at the start index */
1424 nrpages = end_index - index + 1;
Chris Masond3977122009-01-05 21:25:51 -05001425 while (nrpages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001426 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001427 min_t(unsigned long,
1428 nrpages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001429 if (ret == 0) {
1430 ret = -EAGAIN;
1431 goto done;
1432 }
1433 /* now we have an array of pages, lock them all */
1434 for (i = 0; i < ret; i++) {
1435 /*
1436 * the caller is taking responsibility for
1437 * locked_page
1438 */
Chris Mason771ed682008-11-06 22:02:51 -05001439 if (pages[i] != locked_page) {
Chris Masonc8b97812008-10-29 14:49:59 -04001440 lock_page(pages[i]);
Chris Masonf2b1c412008-11-10 07:31:30 -05001441 if (!PageDirty(pages[i]) ||
1442 pages[i]->mapping != inode->i_mapping) {
Chris Mason771ed682008-11-06 22:02:51 -05001443 ret = -EAGAIN;
1444 unlock_page(pages[i]);
1445 page_cache_release(pages[i]);
1446 goto done;
1447 }
1448 }
Chris Masonc8b97812008-10-29 14:49:59 -04001449 page_cache_release(pages[i]);
Chris Mason771ed682008-11-06 22:02:51 -05001450 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001451 }
Chris Masonc8b97812008-10-29 14:49:59 -04001452 nrpages -= ret;
1453 index += ret;
1454 cond_resched();
1455 }
1456 ret = 0;
1457done:
1458 if (ret && pages_locked) {
1459 __unlock_for_delalloc(inode, locked_page,
1460 delalloc_start,
1461 ((u64)(start_index + pages_locked - 1)) <<
1462 PAGE_CACHE_SHIFT);
1463 }
1464 return ret;
1465}
1466
1467/*
1468 * find a contiguous range of bytes in the file marked as delalloc, not
1469 * more than 'max_bytes'. start and end are used to return the range,
1470 *
1471 * 1 is returned if we find something, 0 if nothing was in the tree
1472 */
1473static noinline u64 find_lock_delalloc_range(struct inode *inode,
1474 struct extent_io_tree *tree,
1475 struct page *locked_page,
1476 u64 *start, u64 *end,
1477 u64 max_bytes)
1478{
1479 u64 delalloc_start;
1480 u64 delalloc_end;
1481 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001482 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001483 int ret;
1484 int loops = 0;
1485
1486again:
1487 /* step one, find a bunch of delalloc bytes starting at start */
1488 delalloc_start = *start;
1489 delalloc_end = 0;
1490 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001491 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001492 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001493 *start = delalloc_start;
1494 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001495 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001496 return found;
1497 }
1498
1499 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001500 * start comes from the offset of locked_page. We have to lock
1501 * pages in order, so we can't process delalloc bytes before
1502 * locked_page
1503 */
Chris Masond3977122009-01-05 21:25:51 -05001504 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001505 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001506
1507 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001508 * make sure to limit the number of pages we try to lock down
1509 * if we're looping.
1510 */
Chris Masond3977122009-01-05 21:25:51 -05001511 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
Chris Mason771ed682008-11-06 22:02:51 -05001512 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
Chris Masond3977122009-01-05 21:25:51 -05001513
Chris Masonc8b97812008-10-29 14:49:59 -04001514 /* step two, lock all the pages after the page that has start */
1515 ret = lock_delalloc_pages(inode, locked_page,
1516 delalloc_start, delalloc_end);
1517 if (ret == -EAGAIN) {
1518 /* some of the pages are gone, lets avoid looping by
1519 * shortening the size of the delalloc range we're searching
1520 */
Chris Mason9655d292009-09-02 15:22:30 -04001521 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001522 if (!loops) {
1523 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1524 max_bytes = PAGE_CACHE_SIZE - offset;
1525 loops = 1;
1526 goto again;
1527 } else {
1528 found = 0;
1529 goto out_failed;
1530 }
1531 }
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001532 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
Chris Masonc8b97812008-10-29 14:49:59 -04001533
1534 /* step three, lock the state bits for the whole range */
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001535 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001536
1537 /* then test to make sure it is all still delalloc */
1538 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001539 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001540 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001541 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1542 &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001543 __unlock_for_delalloc(inode, locked_page,
1544 delalloc_start, delalloc_end);
1545 cond_resched();
1546 goto again;
1547 }
Chris Mason9655d292009-09-02 15:22:30 -04001548 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001549 *start = delalloc_start;
1550 *end = delalloc_end;
1551out_failed:
1552 return found;
1553}
1554
1555int extent_clear_unlock_delalloc(struct inode *inode,
1556 struct extent_io_tree *tree,
1557 u64 start, u64 end, struct page *locked_page,
Chris Masona791e352009-10-08 11:27:10 -04001558 unsigned long op)
Chris Masonc8b97812008-10-29 14:49:59 -04001559{
1560 int ret;
1561 struct page *pages[16];
1562 unsigned long index = start >> PAGE_CACHE_SHIFT;
1563 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1564 unsigned long nr_pages = end_index - index + 1;
1565 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001566 int clear_bits = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001567
Chris Masona791e352009-10-08 11:27:10 -04001568 if (op & EXTENT_CLEAR_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001569 clear_bits |= EXTENT_LOCKED;
Chris Masona791e352009-10-08 11:27:10 -04001570 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001571 clear_bits |= EXTENT_DIRTY;
1572
Chris Masona791e352009-10-08 11:27:10 -04001573 if (op & EXTENT_CLEAR_DELALLOC)
Chris Mason771ed682008-11-06 22:02:51 -05001574 clear_bits |= EXTENT_DELALLOC;
1575
Chris Mason2c64c532009-09-02 15:04:12 -04001576 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
Josef Bacik32c00af2009-10-08 13:34:05 -04001577 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1578 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1579 EXTENT_SET_PRIVATE2)))
Chris Mason771ed682008-11-06 22:02:51 -05001580 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001581
Chris Masond3977122009-01-05 21:25:51 -05001582 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001583 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001584 min_t(unsigned long,
1585 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001586 for (i = 0; i < ret; i++) {
Chris Mason8b62b722009-09-02 16:53:46 -04001587
Chris Masona791e352009-10-08 11:27:10 -04001588 if (op & EXTENT_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001589 SetPagePrivate2(pages[i]);
1590
Chris Masonc8b97812008-10-29 14:49:59 -04001591 if (pages[i] == locked_page) {
1592 page_cache_release(pages[i]);
1593 continue;
1594 }
Chris Masona791e352009-10-08 11:27:10 -04001595 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001596 clear_page_dirty_for_io(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001597 if (op & EXTENT_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001598 set_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001599 if (op & EXTENT_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001600 end_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001601 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
Chris Mason771ed682008-11-06 22:02:51 -05001602 unlock_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001603 page_cache_release(pages[i]);
1604 }
1605 nr_pages -= ret;
1606 index += ret;
1607 cond_resched();
1608 }
1609 return 0;
1610}
Chris Masonc8b97812008-10-29 14:49:59 -04001611
Chris Masond352ac62008-09-29 15:18:18 -04001612/*
1613 * count the number of bytes in the tree that have a given bit(s)
1614 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1615 * cached. The total number found is returned.
1616 */
Chris Masond1310b22008-01-24 16:13:08 -05001617u64 count_range_bits(struct extent_io_tree *tree,
1618 u64 *start, u64 search_end, u64 max_bytes,
Chris Masonec29ed52011-02-23 16:23:20 -05001619 unsigned long bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001620{
1621 struct rb_node *node;
1622 struct extent_state *state;
1623 u64 cur_start = *start;
1624 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001625 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001626 int found = 0;
1627
1628 if (search_end <= cur_start) {
Chris Masond1310b22008-01-24 16:13:08 -05001629 WARN_ON(1);
1630 return 0;
1631 }
1632
Chris Masoncad321a2008-12-17 14:51:42 -05001633 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001634 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1635 total_bytes = tree->dirty_bytes;
1636 goto out;
1637 }
1638 /*
1639 * this search will find all the extents that end after
1640 * our range starts.
1641 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001642 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001643 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001644 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001645
Chris Masond3977122009-01-05 21:25:51 -05001646 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001647 state = rb_entry(node, struct extent_state, rb_node);
1648 if (state->start > search_end)
1649 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001650 if (contig && found && state->start > last + 1)
1651 break;
1652 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001653 total_bytes += min(search_end, state->end) + 1 -
1654 max(cur_start, state->start);
1655 if (total_bytes >= max_bytes)
1656 break;
1657 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001658 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001659 found = 1;
1660 }
Chris Masonec29ed52011-02-23 16:23:20 -05001661 last = state->end;
1662 } else if (contig && found) {
1663 break;
Chris Masond1310b22008-01-24 16:13:08 -05001664 }
1665 node = rb_next(node);
1666 if (!node)
1667 break;
1668 }
1669out:
Chris Masoncad321a2008-12-17 14:51:42 -05001670 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001671 return total_bytes;
1672}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001673
Chris Masond352ac62008-09-29 15:18:18 -04001674/*
1675 * set the private field for a given byte offset in the tree. If there isn't
1676 * an extent_state there already, this does nothing.
1677 */
Chris Masond1310b22008-01-24 16:13:08 -05001678int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1679{
1680 struct rb_node *node;
1681 struct extent_state *state;
1682 int ret = 0;
1683
Chris Masoncad321a2008-12-17 14:51:42 -05001684 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001685 /*
1686 * this search will find all the extents that end after
1687 * our range starts.
1688 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001689 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001690 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001691 ret = -ENOENT;
1692 goto out;
1693 }
1694 state = rb_entry(node, struct extent_state, rb_node);
1695 if (state->start != start) {
1696 ret = -ENOENT;
1697 goto out;
1698 }
1699 state->private = private;
1700out:
Chris Masoncad321a2008-12-17 14:51:42 -05001701 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001702 return ret;
1703}
1704
1705int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1706{
1707 struct rb_node *node;
1708 struct extent_state *state;
1709 int ret = 0;
1710
Chris Masoncad321a2008-12-17 14:51:42 -05001711 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001712 /*
1713 * this search will find all the extents that end after
1714 * our range starts.
1715 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001716 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001717 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001718 ret = -ENOENT;
1719 goto out;
1720 }
1721 state = rb_entry(node, struct extent_state, rb_node);
1722 if (state->start != start) {
1723 ret = -ENOENT;
1724 goto out;
1725 }
1726 *private = state->private;
1727out:
Chris Masoncad321a2008-12-17 14:51:42 -05001728 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001729 return ret;
1730}
1731
1732/*
1733 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001734 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001735 * has the bits set. Otherwise, 1 is returned if any bit in the
1736 * range is found set.
1737 */
1738int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason9655d292009-09-02 15:22:30 -04001739 int bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001740{
1741 struct extent_state *state = NULL;
1742 struct rb_node *node;
1743 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001744
Chris Masoncad321a2008-12-17 14:51:42 -05001745 spin_lock(&tree->lock);
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001746 if (cached && cached->tree && cached->start <= start &&
1747 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04001748 node = &cached->rb_node;
1749 else
1750 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001751 while (node && start <= end) {
1752 state = rb_entry(node, struct extent_state, rb_node);
1753
1754 if (filled && state->start > start) {
1755 bitset = 0;
1756 break;
1757 }
1758
1759 if (state->start > end)
1760 break;
1761
1762 if (state->state & bits) {
1763 bitset = 1;
1764 if (!filled)
1765 break;
1766 } else if (filled) {
1767 bitset = 0;
1768 break;
1769 }
Chris Mason46562ce2009-09-23 20:23:16 -04001770
1771 if (state->end == (u64)-1)
1772 break;
1773
Chris Masond1310b22008-01-24 16:13:08 -05001774 start = state->end + 1;
1775 if (start > end)
1776 break;
1777 node = rb_next(node);
1778 if (!node) {
1779 if (filled)
1780 bitset = 0;
1781 break;
1782 }
1783 }
Chris Masoncad321a2008-12-17 14:51:42 -05001784 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001785 return bitset;
1786}
Chris Masond1310b22008-01-24 16:13:08 -05001787
1788/*
1789 * helper function to set a given page up to date if all the
1790 * extents in the tree for that page are up to date
1791 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001792static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001793{
1794 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1795 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001796 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001797 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001798}
1799
1800/*
1801 * helper function to unlock a page if all the extents in the tree
1802 * for that page are unlocked
1803 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001804static void check_page_locked(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001805{
1806 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1807 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001808 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001809 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05001810}
1811
1812/*
1813 * helper function to end page writeback if all the extents
1814 * in the tree for that page are done with writeback
1815 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001816static void check_page_writeback(struct extent_io_tree *tree,
1817 struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001818{
Chris Mason1edbb732009-09-02 13:24:36 -04001819 end_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05001820}
1821
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001822/*
1823 * When IO fails, either with EIO or csum verification fails, we
1824 * try other mirrors that might have a good copy of the data. This
1825 * io_failure_record is used to record state as we go through all the
1826 * mirrors. If another mirror has good data, the page is set up to date
1827 * and things continue. If a good mirror can't be found, the original
1828 * bio end_io callback is called to indicate things have failed.
1829 */
1830struct io_failure_record {
1831 struct page *page;
1832 u64 start;
1833 u64 len;
1834 u64 logical;
1835 unsigned long bio_flags;
1836 int this_mirror;
1837 int failed_mirror;
1838 int in_validation;
1839};
1840
1841static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1842 int did_repair)
1843{
1844 int ret;
1845 int err = 0;
1846 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1847
1848 set_state_private(failure_tree, rec->start, 0);
1849 ret = clear_extent_bits(failure_tree, rec->start,
1850 rec->start + rec->len - 1,
1851 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1852 if (ret)
1853 err = ret;
1854
1855 if (did_repair) {
1856 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1857 rec->start + rec->len - 1,
1858 EXTENT_DAMAGED, GFP_NOFS);
1859 if (ret && !err)
1860 err = ret;
1861 }
1862
1863 kfree(rec);
1864 return err;
1865}
1866
1867static void repair_io_failure_callback(struct bio *bio, int err)
1868{
1869 complete(bio->bi_private);
1870}
1871
1872/*
1873 * this bypasses the standard btrfs submit functions deliberately, as
1874 * the standard behavior is to write all copies in a raid setup. here we only
1875 * want to write the one bad copy. so we do the mapping for ourselves and issue
1876 * submit_bio directly.
1877 * to avoid any synchonization issues, wait for the data after writing, which
1878 * actually prevents the read that triggered the error from finishing.
1879 * currently, there can be no more than two copies of every data bit. thus,
1880 * exactly one rewrite is required.
1881 */
1882int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1883 u64 length, u64 logical, struct page *page,
1884 int mirror_num)
1885{
1886 struct bio *bio;
1887 struct btrfs_device *dev;
1888 DECLARE_COMPLETION_ONSTACK(compl);
1889 u64 map_length = 0;
1890 u64 sector;
1891 struct btrfs_bio *bbio = NULL;
1892 int ret;
1893
1894 BUG_ON(!mirror_num);
1895
1896 bio = bio_alloc(GFP_NOFS, 1);
1897 if (!bio)
1898 return -EIO;
1899 bio->bi_private = &compl;
1900 bio->bi_end_io = repair_io_failure_callback;
1901 bio->bi_size = 0;
1902 map_length = length;
1903
1904 ret = btrfs_map_block(map_tree, WRITE, logical,
1905 &map_length, &bbio, mirror_num);
1906 if (ret) {
1907 bio_put(bio);
1908 return -EIO;
1909 }
1910 BUG_ON(mirror_num != bbio->mirror_num);
1911 sector = bbio->stripes[mirror_num-1].physical >> 9;
1912 bio->bi_sector = sector;
1913 dev = bbio->stripes[mirror_num-1].dev;
1914 kfree(bbio);
1915 if (!dev || !dev->bdev || !dev->writeable) {
1916 bio_put(bio);
1917 return -EIO;
1918 }
1919 bio->bi_bdev = dev->bdev;
1920 bio_add_page(bio, page, length, start-page_offset(page));
Stefan Behrens21adbd52011-11-09 13:44:05 +01001921 btrfsic_submit_bio(WRITE_SYNC, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001922 wait_for_completion(&compl);
1923
1924 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1925 /* try to remap that extent elsewhere? */
1926 bio_put(bio);
1927 return -EIO;
1928 }
1929
1930 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1931 "sector %llu)\n", page->mapping->host->i_ino, start,
1932 dev->name, sector);
1933
1934 bio_put(bio);
1935 return 0;
1936}
1937
Josef Bacikea466792012-03-26 21:57:36 -04001938int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1939 int mirror_num)
1940{
1941 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1942 u64 start = eb->start;
1943 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond95603b2012-04-12 15:55:15 -04001944 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04001945
1946 for (i = 0; i < num_pages; i++) {
1947 struct page *p = extent_buffer_page(eb, i);
1948 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1949 start, p, mirror_num);
1950 if (ret)
1951 break;
1952 start += PAGE_CACHE_SIZE;
1953 }
1954
1955 return ret;
1956}
1957
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001958/*
1959 * each time an IO finishes, we do a fast check in the IO failure tree
1960 * to see if we need to process or clean up an io_failure_record
1961 */
1962static int clean_io_failure(u64 start, struct page *page)
1963{
1964 u64 private;
1965 u64 private_failure;
1966 struct io_failure_record *failrec;
1967 struct btrfs_mapping_tree *map_tree;
1968 struct extent_state *state;
1969 int num_copies;
1970 int did_repair = 0;
1971 int ret;
1972 struct inode *inode = page->mapping->host;
1973
1974 private = 0;
1975 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1976 (u64)-1, 1, EXTENT_DIRTY, 0);
1977 if (!ret)
1978 return 0;
1979
1980 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1981 &private_failure);
1982 if (ret)
1983 return 0;
1984
1985 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1986 BUG_ON(!failrec->this_mirror);
1987
1988 if (failrec->in_validation) {
1989 /* there was no real error, just free the record */
1990 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1991 failrec->start);
1992 did_repair = 1;
1993 goto out;
1994 }
1995
1996 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1997 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1998 failrec->start,
1999 EXTENT_LOCKED);
2000 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2001
2002 if (state && state->start == failrec->start) {
2003 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
2004 num_copies = btrfs_num_copies(map_tree, failrec->logical,
2005 failrec->len);
2006 if (num_copies > 1) {
2007 ret = repair_io_failure(map_tree, start, failrec->len,
2008 failrec->logical, page,
2009 failrec->failed_mirror);
2010 did_repair = !ret;
2011 }
2012 }
2013
2014out:
2015 if (!ret)
2016 ret = free_io_failure(inode, failrec, did_repair);
2017
2018 return ret;
2019}
2020
2021/*
2022 * this is a generic handler for readpage errors (default
2023 * readpage_io_failed_hook). if other copies exist, read those and write back
2024 * good data to the failed position. does not investigate in remapping the
2025 * failed extent elsewhere, hoping the device will be smart enough to do this as
2026 * needed
2027 */
2028
2029static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2030 u64 start, u64 end, int failed_mirror,
2031 struct extent_state *state)
2032{
2033 struct io_failure_record *failrec = NULL;
2034 u64 private;
2035 struct extent_map *em;
2036 struct inode *inode = page->mapping->host;
2037 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2038 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2039 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2040 struct bio *bio;
2041 int num_copies;
2042 int ret;
2043 int read_mode;
2044 u64 logical;
2045
2046 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2047
2048 ret = get_state_private(failure_tree, start, &private);
2049 if (ret) {
2050 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2051 if (!failrec)
2052 return -ENOMEM;
2053 failrec->start = start;
2054 failrec->len = end - start + 1;
2055 failrec->this_mirror = 0;
2056 failrec->bio_flags = 0;
2057 failrec->in_validation = 0;
2058
2059 read_lock(&em_tree->lock);
2060 em = lookup_extent_mapping(em_tree, start, failrec->len);
2061 if (!em) {
2062 read_unlock(&em_tree->lock);
2063 kfree(failrec);
2064 return -EIO;
2065 }
2066
2067 if (em->start > start || em->start + em->len < start) {
2068 free_extent_map(em);
2069 em = NULL;
2070 }
2071 read_unlock(&em_tree->lock);
2072
2073 if (!em || IS_ERR(em)) {
2074 kfree(failrec);
2075 return -EIO;
2076 }
2077 logical = start - em->start;
2078 logical = em->block_start + logical;
2079 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2080 logical = em->block_start;
2081 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2082 extent_set_compress_type(&failrec->bio_flags,
2083 em->compress_type);
2084 }
2085 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2086 "len=%llu\n", logical, start, failrec->len);
2087 failrec->logical = logical;
2088 free_extent_map(em);
2089
2090 /* set the bits in the private failure tree */
2091 ret = set_extent_bits(failure_tree, start, end,
2092 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2093 if (ret >= 0)
2094 ret = set_state_private(failure_tree, start,
2095 (u64)(unsigned long)failrec);
2096 /* set the bits in the inode's tree */
2097 if (ret >= 0)
2098 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2099 GFP_NOFS);
2100 if (ret < 0) {
2101 kfree(failrec);
2102 return ret;
2103 }
2104 } else {
2105 failrec = (struct io_failure_record *)(unsigned long)private;
2106 pr_debug("bio_readpage_error: (found) logical=%llu, "
2107 "start=%llu, len=%llu, validation=%d\n",
2108 failrec->logical, failrec->start, failrec->len,
2109 failrec->in_validation);
2110 /*
2111 * when data can be on disk more than twice, add to failrec here
2112 * (e.g. with a list for failed_mirror) to make
2113 * clean_io_failure() clean all those errors at once.
2114 */
2115 }
2116 num_copies = btrfs_num_copies(
2117 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2118 failrec->logical, failrec->len);
2119 if (num_copies == 1) {
2120 /*
2121 * we only have a single copy of the data, so don't bother with
2122 * all the retry and error correction code that follows. no
2123 * matter what the error is, it is very likely to persist.
2124 */
2125 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2126 "state=%p, num_copies=%d, next_mirror %d, "
2127 "failed_mirror %d\n", state, num_copies,
2128 failrec->this_mirror, failed_mirror);
2129 free_io_failure(inode, failrec, 0);
2130 return -EIO;
2131 }
2132
2133 if (!state) {
2134 spin_lock(&tree->lock);
2135 state = find_first_extent_bit_state(tree, failrec->start,
2136 EXTENT_LOCKED);
2137 if (state && state->start != failrec->start)
2138 state = NULL;
2139 spin_unlock(&tree->lock);
2140 }
2141
2142 /*
2143 * there are two premises:
2144 * a) deliver good data to the caller
2145 * b) correct the bad sectors on disk
2146 */
2147 if (failed_bio->bi_vcnt > 1) {
2148 /*
2149 * to fulfill b), we need to know the exact failing sectors, as
2150 * we don't want to rewrite any more than the failed ones. thus,
2151 * we need separate read requests for the failed bio
2152 *
2153 * if the following BUG_ON triggers, our validation request got
2154 * merged. we need separate requests for our algorithm to work.
2155 */
2156 BUG_ON(failrec->in_validation);
2157 failrec->in_validation = 1;
2158 failrec->this_mirror = failed_mirror;
2159 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2160 } else {
2161 /*
2162 * we're ready to fulfill a) and b) alongside. get a good copy
2163 * of the failed sector and if we succeed, we have setup
2164 * everything for repair_io_failure to do the rest for us.
2165 */
2166 if (failrec->in_validation) {
2167 BUG_ON(failrec->this_mirror != failed_mirror);
2168 failrec->in_validation = 0;
2169 failrec->this_mirror = 0;
2170 }
2171 failrec->failed_mirror = failed_mirror;
2172 failrec->this_mirror++;
2173 if (failrec->this_mirror == failed_mirror)
2174 failrec->this_mirror++;
2175 read_mode = READ_SYNC;
2176 }
2177
2178 if (!state || failrec->this_mirror > num_copies) {
2179 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2180 "next_mirror %d, failed_mirror %d\n", state,
2181 num_copies, failrec->this_mirror, failed_mirror);
2182 free_io_failure(inode, failrec, 0);
2183 return -EIO;
2184 }
2185
2186 bio = bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04002187 if (!bio) {
2188 free_io_failure(inode, failrec, 0);
2189 return -EIO;
2190 }
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002191 bio->bi_private = state;
2192 bio->bi_end_io = failed_bio->bi_end_io;
2193 bio->bi_sector = failrec->logical >> 9;
2194 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2195 bio->bi_size = 0;
2196
2197 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2198
2199 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2200 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2201 failrec->this_mirror, num_copies, failrec->in_validation);
2202
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002203 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2204 failrec->this_mirror,
2205 failrec->bio_flags, 0);
2206 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002207}
2208
Chris Masond1310b22008-01-24 16:13:08 -05002209/* lots and lots of room for performance fixes in the end_bio funcs */
2210
Jeff Mahoney87826df2012-02-15 16:23:57 +01002211int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2212{
2213 int uptodate = (err == 0);
2214 struct extent_io_tree *tree;
2215 int ret;
2216
2217 tree = &BTRFS_I(page->mapping->host)->io_tree;
2218
2219 if (tree->ops && tree->ops->writepage_end_io_hook) {
2220 ret = tree->ops->writepage_end_io_hook(page, start,
2221 end, NULL, uptodate);
2222 if (ret)
2223 uptodate = 0;
2224 }
2225
2226 if (!uptodate && tree->ops &&
2227 tree->ops->writepage_io_failed_hook) {
2228 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2229 start, end, NULL);
2230 /* Writeback already completed */
2231 if (ret == 0)
2232 return 1;
2233 }
2234
2235 if (!uptodate) {
2236 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2237 ClearPageUptodate(page);
2238 SetPageError(page);
2239 }
2240 return 0;
2241}
2242
Chris Masond1310b22008-01-24 16:13:08 -05002243/*
2244 * after a writepage IO is done, we need to:
2245 * clear the uptodate bits on error
2246 * clear the writeback bits in the extent tree for this IO
2247 * end_page_writeback if the page has no more pending IO
2248 *
2249 * Scheduling is not allowed, so the extent state tree is expected
2250 * to have one and only one object corresponding to this IO.
2251 */
Chris Masond1310b22008-01-24 16:13:08 -05002252static void end_bio_extent_writepage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002253{
Chris Masond1310b22008-01-24 16:13:08 -05002254 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04002255 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05002256 u64 start;
2257 u64 end;
2258 int whole_page;
2259
Chris Masond1310b22008-01-24 16:13:08 -05002260 do {
2261 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04002262 tree = &BTRFS_I(page->mapping->host)->io_tree;
2263
Chris Masond1310b22008-01-24 16:13:08 -05002264 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2265 bvec->bv_offset;
2266 end = start + bvec->bv_len - 1;
2267
2268 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2269 whole_page = 1;
2270 else
2271 whole_page = 0;
2272
2273 if (--bvec >= bio->bi_io_vec)
2274 prefetchw(&bvec->bv_page->flags);
Chris Mason1259ab72008-05-12 13:39:03 -04002275
Jeff Mahoney87826df2012-02-15 16:23:57 +01002276 if (end_extent_writepage(page, err, start, end))
2277 continue;
Chris Mason70dec802008-01-29 09:59:12 -05002278
Chris Masond1310b22008-01-24 16:13:08 -05002279 if (whole_page)
2280 end_page_writeback(page);
2281 else
2282 check_page_writeback(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05002283 } while (bvec >= bio->bi_io_vec);
Chris Mason2b1f55b2008-09-24 11:48:04 -04002284
Chris Masond1310b22008-01-24 16:13:08 -05002285 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002286}
2287
2288/*
2289 * after a readpage IO is done, we need to:
2290 * clear the uptodate bits on error
2291 * set the uptodate bits if things worked
2292 * set the page up to date if all extents in the tree are uptodate
2293 * clear the lock bit in the extent tree
2294 * unlock the page if there are no other extents locked for it
2295 *
2296 * Scheduling is not allowed, so the extent state tree is expected
2297 * to have one and only one object corresponding to this IO.
2298 */
Chris Masond1310b22008-01-24 16:13:08 -05002299static void end_bio_extent_readpage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002300{
2301 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Mason4125bf72010-02-03 18:18:45 +00002302 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2303 struct bio_vec *bvec = bio->bi_io_vec;
David Woodhouse902b22f2008-08-20 08:51:49 -04002304 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05002305 u64 start;
2306 u64 end;
2307 int whole_page;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002308 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002309 int ret;
2310
Chris Masond20f7042008-12-08 16:58:54 -05002311 if (err)
2312 uptodate = 0;
2313
Chris Masond1310b22008-01-24 16:13:08 -05002314 do {
2315 struct page *page = bvec->bv_page;
Arne Jansen507903b2011-04-06 10:02:20 +00002316 struct extent_state *cached = NULL;
2317 struct extent_state *state;
2318
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002319 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2320 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2321 (long int)bio->bi_bdev);
David Woodhouse902b22f2008-08-20 08:51:49 -04002322 tree = &BTRFS_I(page->mapping->host)->io_tree;
2323
Chris Masond1310b22008-01-24 16:13:08 -05002324 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2325 bvec->bv_offset;
2326 end = start + bvec->bv_len - 1;
2327
2328 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2329 whole_page = 1;
2330 else
2331 whole_page = 0;
2332
Chris Mason4125bf72010-02-03 18:18:45 +00002333 if (++bvec <= bvec_end)
Chris Masond1310b22008-01-24 16:13:08 -05002334 prefetchw(&bvec->bv_page->flags);
2335
Arne Jansen507903b2011-04-06 10:02:20 +00002336 spin_lock(&tree->lock);
Chris Mason0d399202011-04-16 06:55:39 -04002337 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
Chris Mason109b36a2011-04-12 13:57:39 -04002338 if (state && state->start == start) {
Arne Jansen507903b2011-04-06 10:02:20 +00002339 /*
2340 * take a reference on the state, unlock will drop
2341 * the ref
2342 */
2343 cache_state(state, &cached);
2344 }
2345 spin_unlock(&tree->lock);
2346
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002347 mirror = (int)(unsigned long)bio->bi_bdev;
Chris Masond1310b22008-01-24 16:13:08 -05002348 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
Chris Mason70dec802008-01-29 09:59:12 -05002349 ret = tree->ops->readpage_end_io_hook(page, start, end,
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002350 state, mirror);
Chris Masond1310b22008-01-24 16:13:08 -05002351 if (ret)
2352 uptodate = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002353 else
2354 clean_io_failure(start, page);
Chris Masond1310b22008-01-24 16:13:08 -05002355 }
Josef Bacikea466792012-03-26 21:57:36 -04002356
Josef Bacikea466792012-03-26 21:57:36 -04002357 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002358 ret = tree->ops->readpage_io_failed_hook(page, mirror);
Josef Bacikea466792012-03-26 21:57:36 -04002359 if (!ret && !err &&
2360 test_bit(BIO_UPTODATE, &bio->bi_flags))
2361 uptodate = 1;
2362 } else if (!uptodate) {
Jan Schmidtf4a8e652011-12-01 09:30:36 -05002363 /*
2364 * The generic bio_readpage_error handles errors the
2365 * following way: If possible, new read requests are
2366 * created and submitted and will end up in
2367 * end_bio_extent_readpage as well (if we're lucky, not
2368 * in the !uptodate case). In that case it returns 0 and
2369 * we just go on with the next page in our bio. If it
2370 * can't handle the error it will return -EIO and we
2371 * remain responsible for that page.
2372 */
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002373 ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
Chris Mason7e383262008-04-09 16:28:12 -04002374 if (ret == 0) {
Chris Mason3b951512008-04-17 11:29:12 -04002375 uptodate =
2376 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masond20f7042008-12-08 16:58:54 -05002377 if (err)
2378 uptodate = 0;
Arne Jansen507903b2011-04-06 10:02:20 +00002379 uncache_state(&cached);
Chris Mason7e383262008-04-09 16:28:12 -04002380 continue;
2381 }
2382 }
Chris Mason70dec802008-01-29 09:59:12 -05002383
Josef Bacik0b32f4b2012-03-13 09:38:00 -04002384 if (uptodate && tree->track_uptodate) {
Arne Jansen507903b2011-04-06 10:02:20 +00002385 set_extent_uptodate(tree, start, end, &cached,
David Woodhouse902b22f2008-08-20 08:51:49 -04002386 GFP_ATOMIC);
Chris Mason771ed682008-11-06 22:02:51 -05002387 }
Arne Jansen507903b2011-04-06 10:02:20 +00002388 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05002389
Chris Mason70dec802008-01-29 09:59:12 -05002390 if (whole_page) {
2391 if (uptodate) {
2392 SetPageUptodate(page);
2393 } else {
2394 ClearPageUptodate(page);
2395 SetPageError(page);
2396 }
Chris Masond1310b22008-01-24 16:13:08 -05002397 unlock_page(page);
Chris Mason70dec802008-01-29 09:59:12 -05002398 } else {
2399 if (uptodate) {
2400 check_page_uptodate(tree, page);
2401 } else {
2402 ClearPageUptodate(page);
2403 SetPageError(page);
2404 }
Chris Masond1310b22008-01-24 16:13:08 -05002405 check_page_locked(tree, page);
Chris Mason70dec802008-01-29 09:59:12 -05002406 }
Chris Mason4125bf72010-02-03 18:18:45 +00002407 } while (bvec <= bvec_end);
Chris Masond1310b22008-01-24 16:13:08 -05002408
2409 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002410}
2411
Miao Xie88f794e2010-11-22 03:02:55 +00002412struct bio *
2413btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2414 gfp_t gfp_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002415{
2416 struct bio *bio;
2417
2418 bio = bio_alloc(gfp_flags, nr_vecs);
2419
2420 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2421 while (!bio && (nr_vecs /= 2))
2422 bio = bio_alloc(gfp_flags, nr_vecs);
2423 }
2424
2425 if (bio) {
Chris Masone1c4b742008-04-22 13:26:46 -04002426 bio->bi_size = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002427 bio->bi_bdev = bdev;
2428 bio->bi_sector = first_sector;
2429 }
2430 return bio;
2431}
2432
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002433/*
2434 * Since writes are async, they will only return -ENOMEM.
2435 * Reads can return the full range of I/O error conditions.
2436 */
Jeff Mahoney355808c2011-10-03 23:23:14 -04002437static int __must_check submit_one_bio(int rw, struct bio *bio,
2438 int mirror_num, unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002439{
Chris Masond1310b22008-01-24 16:13:08 -05002440 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05002441 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2442 struct page *page = bvec->bv_page;
2443 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05002444 u64 start;
Chris Mason70dec802008-01-29 09:59:12 -05002445
2446 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
Chris Mason70dec802008-01-29 09:59:12 -05002447
David Woodhouse902b22f2008-08-20 08:51:49 -04002448 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002449
2450 bio_get(bio);
2451
Chris Mason065631f2008-02-20 12:07:25 -05002452 if (tree->ops && tree->ops->submit_bio_hook)
liubo6b82ce82011-01-26 06:21:39 +00002453 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masoneaf25d92010-05-25 09:48:28 -04002454 mirror_num, bio_flags, start);
Chris Mason0b86a832008-03-24 15:01:56 -04002455 else
Stefan Behrens21adbd52011-11-09 13:44:05 +01002456 btrfsic_submit_bio(rw, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002457
Chris Masond1310b22008-01-24 16:13:08 -05002458 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2459 ret = -EOPNOTSUPP;
2460 bio_put(bio);
2461 return ret;
2462}
2463
Jeff Mahoney3444a972011-10-03 23:23:13 -04002464static int merge_bio(struct extent_io_tree *tree, struct page *page,
2465 unsigned long offset, size_t size, struct bio *bio,
2466 unsigned long bio_flags)
2467{
2468 int ret = 0;
2469 if (tree->ops && tree->ops->merge_bio_hook)
2470 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2471 bio_flags);
2472 BUG_ON(ret < 0);
2473 return ret;
2474
2475}
2476
Chris Masond1310b22008-01-24 16:13:08 -05002477static int submit_extent_page(int rw, struct extent_io_tree *tree,
2478 struct page *page, sector_t sector,
2479 size_t size, unsigned long offset,
2480 struct block_device *bdev,
2481 struct bio **bio_ret,
2482 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04002483 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04002484 int mirror_num,
2485 unsigned long prev_bio_flags,
2486 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002487{
2488 int ret = 0;
2489 struct bio *bio;
2490 int nr;
Chris Masonc8b97812008-10-29 14:49:59 -04002491 int contig = 0;
2492 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2493 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Chris Mason5b050f02008-11-11 09:34:41 -05002494 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05002495
2496 if (bio_ret && *bio_ret) {
2497 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04002498 if (old_compressed)
2499 contig = bio->bi_sector == sector;
2500 else
2501 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2502 sector;
2503
2504 if (prev_bio_flags != bio_flags || !contig ||
Jeff Mahoney3444a972011-10-03 23:23:13 -04002505 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
Chris Masonc8b97812008-10-29 14:49:59 -04002506 bio_add_page(bio, page, page_size, offset) < page_size) {
2507 ret = submit_one_bio(rw, bio, mirror_num,
2508 prev_bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002509 if (ret < 0)
2510 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05002511 bio = NULL;
2512 } else {
2513 return 0;
2514 }
2515 }
Chris Masonc8b97812008-10-29 14:49:59 -04002516 if (this_compressed)
2517 nr = BIO_MAX_PAGES;
2518 else
2519 nr = bio_get_nr_vecs(bdev);
2520
Miao Xie88f794e2010-11-22 03:02:55 +00002521 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
Tsutomu Itoh5df67082011-02-01 09:17:35 +00002522 if (!bio)
2523 return -ENOMEM;
Chris Mason70dec802008-01-29 09:59:12 -05002524
Chris Masonc8b97812008-10-29 14:49:59 -04002525 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05002526 bio->bi_end_io = end_io_func;
2527 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05002528
Chris Masond3977122009-01-05 21:25:51 -05002529 if (bio_ret)
Chris Masond1310b22008-01-24 16:13:08 -05002530 *bio_ret = bio;
Chris Masond3977122009-01-05 21:25:51 -05002531 else
Chris Masonc8b97812008-10-29 14:49:59 -04002532 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002533
2534 return ret;
2535}
2536
Josef Bacik4f2de97a2012-03-07 16:20:05 -05002537void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2538{
2539 if (!PagePrivate(page)) {
2540 SetPagePrivate(page);
2541 page_cache_get(page);
2542 set_page_private(page, (unsigned long)eb);
2543 } else {
2544 WARN_ON(page->private != (unsigned long)eb);
2545 }
2546}
2547
Chris Masond1310b22008-01-24 16:13:08 -05002548void set_page_extent_mapped(struct page *page)
2549{
2550 if (!PagePrivate(page)) {
2551 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002552 page_cache_get(page);
Chris Mason6af118c2008-07-22 11:18:07 -04002553 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05002554 }
2555}
2556
Chris Masond1310b22008-01-24 16:13:08 -05002557/*
2558 * basic readpage implementation. Locked extent state structs are inserted
2559 * into the tree that are removed when the IO is done (by the end_io
2560 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002561 * XXX JDM: This needs looking at to ensure proper page locking
Chris Masond1310b22008-01-24 16:13:08 -05002562 */
2563static int __extent_read_full_page(struct extent_io_tree *tree,
2564 struct page *page,
2565 get_extent_t *get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04002566 struct bio **bio, int mirror_num,
2567 unsigned long *bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002568{
2569 struct inode *inode = page->mapping->host;
2570 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2571 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2572 u64 end;
2573 u64 cur = start;
2574 u64 extent_offset;
2575 u64 last_byte = i_size_read(inode);
2576 u64 block_start;
2577 u64 cur_end;
2578 sector_t sector;
2579 struct extent_map *em;
2580 struct block_device *bdev;
Josef Bacik11c65dc2010-05-23 11:07:21 -04002581 struct btrfs_ordered_extent *ordered;
Chris Masond1310b22008-01-24 16:13:08 -05002582 int ret;
2583 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02002584 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002585 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04002586 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05002587 size_t blocksize = inode->i_sb->s_blocksize;
Chris Masonc8b97812008-10-29 14:49:59 -04002588 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002589
2590 set_page_extent_mapped(page);
2591
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002592 if (!PageUptodate(page)) {
2593 if (cleancache_get_page(page) == 0) {
2594 BUG_ON(blocksize != PAGE_SIZE);
2595 goto out;
2596 }
2597 }
2598
Chris Masond1310b22008-01-24 16:13:08 -05002599 end = page_end;
Josef Bacik11c65dc2010-05-23 11:07:21 -04002600 while (1) {
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002601 lock_extent(tree, start, end);
Josef Bacik11c65dc2010-05-23 11:07:21 -04002602 ordered = btrfs_lookup_ordered_extent(inode, start);
2603 if (!ordered)
2604 break;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002605 unlock_extent(tree, start, end);
Josef Bacik11c65dc2010-05-23 11:07:21 -04002606 btrfs_start_ordered_extent(inode, ordered, 1);
2607 btrfs_put_ordered_extent(ordered);
2608 }
Chris Masond1310b22008-01-24 16:13:08 -05002609
Chris Masonc8b97812008-10-29 14:49:59 -04002610 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2611 char *userpage;
2612 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2613
2614 if (zero_offset) {
2615 iosize = PAGE_CACHE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002616 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04002617 memset(userpage + zero_offset, 0, iosize);
2618 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002619 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04002620 }
2621 }
Chris Masond1310b22008-01-24 16:13:08 -05002622 while (cur <= end) {
2623 if (cur >= last_byte) {
2624 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002625 struct extent_state *cached = NULL;
2626
David Sterba306e16c2011-04-19 14:29:38 +02002627 iosize = PAGE_CACHE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002628 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002629 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002630 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002631 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002632 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002633 &cached, GFP_NOFS);
2634 unlock_extent_cached(tree, cur, cur + iosize - 1,
2635 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002636 break;
2637 }
David Sterba306e16c2011-04-19 14:29:38 +02002638 em = get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002639 end - cur + 1, 0);
David Sterbac7040052011-04-19 18:00:01 +02002640 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002641 SetPageError(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002642 unlock_extent(tree, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05002643 break;
2644 }
Chris Masond1310b22008-01-24 16:13:08 -05002645 extent_offset = cur - em->start;
2646 BUG_ON(extent_map_end(em) <= cur);
2647 BUG_ON(end < cur);
2648
Li Zefan261507a02010-12-17 14:21:50 +08002649 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Chris Masonc8b97812008-10-29 14:49:59 -04002650 this_bio_flag = EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08002651 extent_set_compress_type(&this_bio_flag,
2652 em->compress_type);
2653 }
Chris Masonc8b97812008-10-29 14:49:59 -04002654
Chris Masond1310b22008-01-24 16:13:08 -05002655 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2656 cur_end = min(extent_map_end(em) - 1, end);
2657 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002658 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2659 disk_io_size = em->block_len;
2660 sector = em->block_start >> 9;
2661 } else {
2662 sector = (em->block_start + extent_offset) >> 9;
2663 disk_io_size = iosize;
2664 }
Chris Masond1310b22008-01-24 16:13:08 -05002665 bdev = em->bdev;
2666 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002667 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2668 block_start = EXTENT_MAP_HOLE;
Chris Masond1310b22008-01-24 16:13:08 -05002669 free_extent_map(em);
2670 em = NULL;
2671
2672 /* we've found a hole, just zero and go on */
2673 if (block_start == EXTENT_MAP_HOLE) {
2674 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002675 struct extent_state *cached = NULL;
2676
Cong Wang7ac687d2011-11-25 23:14:28 +08002677 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002678 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002679 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002680 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002681
2682 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002683 &cached, GFP_NOFS);
2684 unlock_extent_cached(tree, cur, cur + iosize - 1,
2685 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002686 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002687 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002688 continue;
2689 }
2690 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04002691 if (test_range_bit(tree, cur, cur_end,
2692 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04002693 check_page_uptodate(tree, page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002694 unlock_extent(tree, cur, cur + iosize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05002695 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002696 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002697 continue;
2698 }
Chris Mason70dec802008-01-29 09:59:12 -05002699 /* we have an inline extent but it didn't get marked up
2700 * to date. Error out
2701 */
2702 if (block_start == EXTENT_MAP_INLINE) {
2703 SetPageError(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002704 unlock_extent(tree, cur, cur + iosize - 1);
Chris Mason70dec802008-01-29 09:59:12 -05002705 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002706 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05002707 continue;
2708 }
Chris Masond1310b22008-01-24 16:13:08 -05002709
2710 ret = 0;
2711 if (tree->ops && tree->ops->readpage_io_hook) {
2712 ret = tree->ops->readpage_io_hook(page, cur,
2713 cur + iosize - 1);
2714 }
2715 if (!ret) {
Chris Mason89642222008-07-24 09:41:53 -04002716 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2717 pnr -= page->index;
Chris Masond1310b22008-01-24 16:13:08 -05002718 ret = submit_extent_page(READ, tree, page,
David Sterba306e16c2011-04-19 14:29:38 +02002719 sector, disk_io_size, pg_offset,
Chris Mason89642222008-07-24 09:41:53 -04002720 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04002721 end_bio_extent_readpage, mirror_num,
2722 *bio_flags,
2723 this_bio_flag);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002724 BUG_ON(ret == -ENOMEM);
Chris Mason89642222008-07-24 09:41:53 -04002725 nr++;
Chris Masonc8b97812008-10-29 14:49:59 -04002726 *bio_flags = this_bio_flag;
Chris Masond1310b22008-01-24 16:13:08 -05002727 }
2728 if (ret)
2729 SetPageError(page);
2730 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002731 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002732 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002733out:
Chris Masond1310b22008-01-24 16:13:08 -05002734 if (!nr) {
2735 if (!PageError(page))
2736 SetPageUptodate(page);
2737 unlock_page(page);
2738 }
2739 return 0;
2740}
2741
2742int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002743 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05002744{
2745 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04002746 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002747 int ret;
2748
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002749 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
Chris Masonc8b97812008-10-29 14:49:59 -04002750 &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002751 if (bio)
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002752 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002753 return ret;
2754}
Chris Masond1310b22008-01-24 16:13:08 -05002755
Chris Mason11c83492009-04-20 15:50:09 -04002756static noinline void update_nr_written(struct page *page,
2757 struct writeback_control *wbc,
2758 unsigned long nr_written)
2759{
2760 wbc->nr_to_write -= nr_written;
2761 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2762 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2763 page->mapping->writeback_index = page->index + nr_written;
2764}
2765
Chris Masond1310b22008-01-24 16:13:08 -05002766/*
2767 * the writepage semantics are similar to regular writepage. extent
2768 * records are inserted to lock ranges in the tree, and as dirty areas
2769 * are found, they are marked writeback. Then the lock bits are removed
2770 * and the end_io handler clears the writeback ranges
2771 */
2772static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2773 void *data)
2774{
2775 struct inode *inode = page->mapping->host;
2776 struct extent_page_data *epd = data;
2777 struct extent_io_tree *tree = epd->tree;
2778 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2779 u64 delalloc_start;
2780 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2781 u64 end;
2782 u64 cur = start;
2783 u64 extent_offset;
2784 u64 last_byte = i_size_read(inode);
2785 u64 block_start;
2786 u64 iosize;
2787 sector_t sector;
Chris Mason2c64c532009-09-02 15:04:12 -04002788 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002789 struct extent_map *em;
2790 struct block_device *bdev;
2791 int ret;
2792 int nr = 0;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002793 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002794 size_t blocksize;
2795 loff_t i_size = i_size_read(inode);
2796 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2797 u64 nr_delalloc;
2798 u64 delalloc_end;
Chris Masonc8b97812008-10-29 14:49:59 -04002799 int page_started;
2800 int compressed;
Chris Masonffbd5172009-04-20 15:50:09 -04002801 int write_flags;
Chris Mason771ed682008-11-06 22:02:51 -05002802 unsigned long nr_written = 0;
Josef Bacik9e487102011-08-01 12:08:18 -04002803 bool fill_delalloc = true;
Chris Masond1310b22008-01-24 16:13:08 -05002804
Chris Masonffbd5172009-04-20 15:50:09 -04002805 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboe721a9602011-03-09 11:56:30 +01002806 write_flags = WRITE_SYNC;
Chris Masonffbd5172009-04-20 15:50:09 -04002807 else
2808 write_flags = WRITE;
2809
liubo1abe9b82011-03-24 11:18:59 +00002810 trace___extent_writepage(page, inode, wbc);
2811
Chris Masond1310b22008-01-24 16:13:08 -05002812 WARN_ON(!PageLocked(page));
Chris Masonbf0da8c2011-11-04 12:29:37 -04002813
2814 ClearPageError(page);
2815
Chris Mason7f3c74f2008-07-18 12:01:11 -04002816 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason211c17f2008-05-15 09:13:45 -04002817 if (page->index > end_index ||
Chris Mason7f3c74f2008-07-18 12:01:11 -04002818 (page->index == end_index && !pg_offset)) {
Chris Mason39be25c2008-11-10 11:50:50 -05002819 page->mapping->a_ops->invalidatepage(page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002820 unlock_page(page);
2821 return 0;
2822 }
2823
2824 if (page->index == end_index) {
2825 char *userpage;
2826
Cong Wang7ac687d2011-11-25 23:14:28 +08002827 userpage = kmap_atomic(page);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002828 memset(userpage + pg_offset, 0,
2829 PAGE_CACHE_SIZE - pg_offset);
Cong Wang7ac687d2011-11-25 23:14:28 +08002830 kunmap_atomic(userpage);
Chris Mason211c17f2008-05-15 09:13:45 -04002831 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002832 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002833 pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002834
2835 set_page_extent_mapped(page);
2836
Josef Bacik9e487102011-08-01 12:08:18 -04002837 if (!tree->ops || !tree->ops->fill_delalloc)
2838 fill_delalloc = false;
2839
Chris Masond1310b22008-01-24 16:13:08 -05002840 delalloc_start = start;
2841 delalloc_end = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002842 page_started = 0;
Josef Bacik9e487102011-08-01 12:08:18 -04002843 if (!epd->extent_locked && fill_delalloc) {
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002844 u64 delalloc_to_write = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002845 /*
2846 * make sure the wbc mapping index is at least updated
2847 * to this page.
2848 */
2849 update_nr_written(page, wbc, 0);
2850
Chris Masond3977122009-01-05 21:25:51 -05002851 while (delalloc_end < page_end) {
Chris Mason771ed682008-11-06 22:02:51 -05002852 nr_delalloc = find_lock_delalloc_range(inode, tree,
Chris Masonc8b97812008-10-29 14:49:59 -04002853 page,
2854 &delalloc_start,
Chris Masond1310b22008-01-24 16:13:08 -05002855 &delalloc_end,
2856 128 * 1024 * 1024);
Chris Mason771ed682008-11-06 22:02:51 -05002857 if (nr_delalloc == 0) {
2858 delalloc_start = delalloc_end + 1;
2859 continue;
2860 }
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002861 ret = tree->ops->fill_delalloc(inode, page,
2862 delalloc_start,
2863 delalloc_end,
2864 &page_started,
2865 &nr_written);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002866 /* File system has been set read-only */
2867 if (ret) {
2868 SetPageError(page);
2869 goto done;
2870 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002871 /*
2872 * delalloc_end is already one less than the total
2873 * length, so we don't subtract one from
2874 * PAGE_CACHE_SIZE
2875 */
2876 delalloc_to_write += (delalloc_end - delalloc_start +
2877 PAGE_CACHE_SIZE) >>
2878 PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002879 delalloc_start = delalloc_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002880 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002881 if (wbc->nr_to_write < delalloc_to_write) {
2882 int thresh = 8192;
2883
2884 if (delalloc_to_write < thresh * 2)
2885 thresh = delalloc_to_write;
2886 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2887 thresh);
2888 }
Chris Masonc8b97812008-10-29 14:49:59 -04002889
Chris Mason771ed682008-11-06 22:02:51 -05002890 /* did the fill delalloc function already unlock and start
2891 * the IO?
2892 */
2893 if (page_started) {
2894 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002895 /*
2896 * we've unlocked the page, so we can't update
2897 * the mapping's writeback index, just update
2898 * nr_to_write.
2899 */
2900 wbc->nr_to_write -= nr_written;
2901 goto done_unlocked;
Chris Mason771ed682008-11-06 22:02:51 -05002902 }
Chris Masonc8b97812008-10-29 14:49:59 -04002903 }
Chris Mason247e7432008-07-17 12:53:51 -04002904 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04002905 ret = tree->ops->writepage_start_hook(page, start,
2906 page_end);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002907 if (ret) {
2908 /* Fixup worker will requeue */
2909 if (ret == -EBUSY)
2910 wbc->pages_skipped++;
2911 else
2912 redirty_page_for_writepage(wbc, page);
Chris Mason11c83492009-04-20 15:50:09 -04002913 update_nr_written(page, wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04002914 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002915 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002916 goto done_unlocked;
Chris Mason247e7432008-07-17 12:53:51 -04002917 }
2918 }
2919
Chris Mason11c83492009-04-20 15:50:09 -04002920 /*
2921 * we don't want to touch the inode after unlocking the page,
2922 * so we update the mapping writeback index now
2923 */
2924 update_nr_written(page, wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05002925
Chris Masond1310b22008-01-24 16:13:08 -05002926 end = page_end;
Chris Masond1310b22008-01-24 16:13:08 -05002927 if (last_byte <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002928 if (tree->ops && tree->ops->writepage_end_io_hook)
2929 tree->ops->writepage_end_io_hook(page, start,
2930 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002931 goto done;
2932 }
2933
Chris Masond1310b22008-01-24 16:13:08 -05002934 blocksize = inode->i_sb->s_blocksize;
2935
2936 while (cur <= end) {
2937 if (cur >= last_byte) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002938 if (tree->ops && tree->ops->writepage_end_io_hook)
2939 tree->ops->writepage_end_io_hook(page, cur,
2940 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002941 break;
2942 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002943 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002944 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02002945 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002946 SetPageError(page);
2947 break;
2948 }
2949
2950 extent_offset = cur - em->start;
2951 BUG_ON(extent_map_end(em) <= cur);
2952 BUG_ON(end < cur);
2953 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2954 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2955 sector = (em->block_start + extent_offset) >> 9;
2956 bdev = em->bdev;
2957 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04002958 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05002959 free_extent_map(em);
2960 em = NULL;
2961
Chris Masonc8b97812008-10-29 14:49:59 -04002962 /*
2963 * compressed and inline extents are written through other
2964 * paths in the FS
2965 */
2966 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05002967 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04002968 /*
2969 * end_io notification does not happen here for
2970 * compressed extents
2971 */
2972 if (!compressed && tree->ops &&
2973 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04002974 tree->ops->writepage_end_io_hook(page, cur,
2975 cur + iosize - 1,
2976 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002977 else if (compressed) {
2978 /* we don't want to end_page_writeback on
2979 * a compressed extent. this happens
2980 * elsewhere
2981 */
2982 nr++;
2983 }
2984
2985 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002986 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002987 continue;
2988 }
Chris Masond1310b22008-01-24 16:13:08 -05002989 /* leave this out until we have a page_mkwrite call */
2990 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
Chris Mason9655d292009-09-02 15:22:30 -04002991 EXTENT_DIRTY, 0, NULL)) {
Chris Masond1310b22008-01-24 16:13:08 -05002992 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002993 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002994 continue;
2995 }
Chris Masonc8b97812008-10-29 14:49:59 -04002996
Chris Masond1310b22008-01-24 16:13:08 -05002997 if (tree->ops && tree->ops->writepage_io_hook) {
2998 ret = tree->ops->writepage_io_hook(page, cur,
2999 cur + iosize - 1);
3000 } else {
3001 ret = 0;
3002 }
Chris Mason1259ab72008-05-12 13:39:03 -04003003 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003004 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04003005 } else {
Chris Masond1310b22008-01-24 16:13:08 -05003006 unsigned long max_nr = end_index + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003007
Chris Masond1310b22008-01-24 16:13:08 -05003008 set_range_writeback(tree, cur, cur + iosize - 1);
3009 if (!PageWriteback(page)) {
Chris Masond3977122009-01-05 21:25:51 -05003010 printk(KERN_ERR "btrfs warning page %lu not "
3011 "writeback, cur %llu end %llu\n",
3012 page->index, (unsigned long long)cur,
Chris Masond1310b22008-01-24 16:13:08 -05003013 (unsigned long long)end);
3014 }
3015
Chris Masonffbd5172009-04-20 15:50:09 -04003016 ret = submit_extent_page(write_flags, tree, page,
3017 sector, iosize, pg_offset,
3018 bdev, &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04003019 end_bio_extent_writepage,
3020 0, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05003021 if (ret)
3022 SetPageError(page);
3023 }
3024 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003025 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003026 nr++;
3027 }
3028done:
3029 if (nr == 0) {
3030 /* make sure the mapping tag for page dirty gets cleared */
3031 set_page_writeback(page);
3032 end_page_writeback(page);
3033 }
Chris Masond1310b22008-01-24 16:13:08 -05003034 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05003035
Chris Mason11c83492009-04-20 15:50:09 -04003036done_unlocked:
3037
Chris Mason2c64c532009-09-02 15:04:12 -04003038 /* drop our reference on any cached states */
3039 free_extent_state(cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05003040 return 0;
3041}
3042
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003043static int eb_wait(void *word)
3044{
3045 io_schedule();
3046 return 0;
3047}
3048
3049static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3050{
3051 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3052 TASK_UNINTERRUPTIBLE);
3053}
3054
3055static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3056 struct btrfs_fs_info *fs_info,
3057 struct extent_page_data *epd)
3058{
3059 unsigned long i, num_pages;
3060 int flush = 0;
3061 int ret = 0;
3062
3063 if (!btrfs_try_tree_write_lock(eb)) {
3064 flush = 1;
3065 flush_write_bio(epd);
3066 btrfs_tree_lock(eb);
3067 }
3068
3069 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3070 btrfs_tree_unlock(eb);
3071 if (!epd->sync_io)
3072 return 0;
3073 if (!flush) {
3074 flush_write_bio(epd);
3075 flush = 1;
3076 }
Chris Masona098d8e2012-03-21 12:09:56 -04003077 while (1) {
3078 wait_on_extent_buffer_writeback(eb);
3079 btrfs_tree_lock(eb);
3080 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3081 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003082 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003083 }
3084 }
3085
3086 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3087 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3088 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3089 spin_lock(&fs_info->delalloc_lock);
3090 if (fs_info->dirty_metadata_bytes >= eb->len)
3091 fs_info->dirty_metadata_bytes -= eb->len;
3092 else
3093 WARN_ON(1);
3094 spin_unlock(&fs_info->delalloc_lock);
3095 ret = 1;
3096 }
3097
3098 btrfs_tree_unlock(eb);
3099
3100 if (!ret)
3101 return ret;
3102
3103 num_pages = num_extent_pages(eb->start, eb->len);
3104 for (i = 0; i < num_pages; i++) {
3105 struct page *p = extent_buffer_page(eb, i);
3106
3107 if (!trylock_page(p)) {
3108 if (!flush) {
3109 flush_write_bio(epd);
3110 flush = 1;
3111 }
3112 lock_page(p);
3113 }
3114 }
3115
3116 return ret;
3117}
3118
3119static void end_extent_buffer_writeback(struct extent_buffer *eb)
3120{
3121 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3122 smp_mb__after_clear_bit();
3123 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3124}
3125
3126static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3127{
3128 int uptodate = err == 0;
3129 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3130 struct extent_buffer *eb;
3131 int done;
3132
3133 do {
3134 struct page *page = bvec->bv_page;
3135
3136 bvec--;
3137 eb = (struct extent_buffer *)page->private;
3138 BUG_ON(!eb);
3139 done = atomic_dec_and_test(&eb->io_pages);
3140
3141 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3142 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3143 ClearPageUptodate(page);
3144 SetPageError(page);
3145 }
3146
3147 end_page_writeback(page);
3148
3149 if (!done)
3150 continue;
3151
3152 end_extent_buffer_writeback(eb);
3153 } while (bvec >= bio->bi_io_vec);
3154
3155 bio_put(bio);
3156
3157}
3158
3159static int write_one_eb(struct extent_buffer *eb,
3160 struct btrfs_fs_info *fs_info,
3161 struct writeback_control *wbc,
3162 struct extent_page_data *epd)
3163{
3164 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3165 u64 offset = eb->start;
3166 unsigned long i, num_pages;
3167 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3168 int ret;
3169
3170 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3171 num_pages = num_extent_pages(eb->start, eb->len);
3172 atomic_set(&eb->io_pages, num_pages);
3173 for (i = 0; i < num_pages; i++) {
3174 struct page *p = extent_buffer_page(eb, i);
3175
3176 clear_page_dirty_for_io(p);
3177 set_page_writeback(p);
3178 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3179 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3180 -1, end_bio_extent_buffer_writepage,
3181 0, 0, 0);
3182 if (ret) {
3183 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3184 SetPageError(p);
3185 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3186 end_extent_buffer_writeback(eb);
3187 ret = -EIO;
3188 break;
3189 }
3190 offset += PAGE_CACHE_SIZE;
3191 update_nr_written(p, wbc, 1);
3192 unlock_page(p);
3193 }
3194
3195 if (unlikely(ret)) {
3196 for (; i < num_pages; i++) {
3197 struct page *p = extent_buffer_page(eb, i);
3198 unlock_page(p);
3199 }
3200 }
3201
3202 return ret;
3203}
3204
3205int btree_write_cache_pages(struct address_space *mapping,
3206 struct writeback_control *wbc)
3207{
3208 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3209 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3210 struct extent_buffer *eb, *prev_eb = NULL;
3211 struct extent_page_data epd = {
3212 .bio = NULL,
3213 .tree = tree,
3214 .extent_locked = 0,
3215 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3216 };
3217 int ret = 0;
3218 int done = 0;
3219 int nr_to_write_done = 0;
3220 struct pagevec pvec;
3221 int nr_pages;
3222 pgoff_t index;
3223 pgoff_t end; /* Inclusive */
3224 int scanned = 0;
3225 int tag;
3226
3227 pagevec_init(&pvec, 0);
3228 if (wbc->range_cyclic) {
3229 index = mapping->writeback_index; /* Start from prev offset */
3230 end = -1;
3231 } else {
3232 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3233 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3234 scanned = 1;
3235 }
3236 if (wbc->sync_mode == WB_SYNC_ALL)
3237 tag = PAGECACHE_TAG_TOWRITE;
3238 else
3239 tag = PAGECACHE_TAG_DIRTY;
3240retry:
3241 if (wbc->sync_mode == WB_SYNC_ALL)
3242 tag_pages_for_writeback(mapping, index, end);
3243 while (!done && !nr_to_write_done && (index <= end) &&
3244 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3245 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3246 unsigned i;
3247
3248 scanned = 1;
3249 for (i = 0; i < nr_pages; i++) {
3250 struct page *page = pvec.pages[i];
3251
3252 if (!PagePrivate(page))
3253 continue;
3254
3255 if (!wbc->range_cyclic && page->index > end) {
3256 done = 1;
3257 break;
3258 }
3259
3260 eb = (struct extent_buffer *)page->private;
3261 if (!eb) {
3262 WARN_ON(1);
3263 continue;
3264 }
3265
3266 if (eb == prev_eb)
3267 continue;
3268
3269 if (!atomic_inc_not_zero(&eb->refs)) {
3270 WARN_ON(1);
3271 continue;
3272 }
3273
3274 prev_eb = eb;
3275 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3276 if (!ret) {
3277 free_extent_buffer(eb);
3278 continue;
3279 }
3280
3281 ret = write_one_eb(eb, fs_info, wbc, &epd);
3282 if (ret) {
3283 done = 1;
3284 free_extent_buffer(eb);
3285 break;
3286 }
3287 free_extent_buffer(eb);
3288
3289 /*
3290 * the filesystem may choose to bump up nr_to_write.
3291 * We have to make sure to honor the new nr_to_write
3292 * at any time
3293 */
3294 nr_to_write_done = wbc->nr_to_write <= 0;
3295 }
3296 pagevec_release(&pvec);
3297 cond_resched();
3298 }
3299 if (!scanned && !done) {
3300 /*
3301 * We hit the last page and there is more work to be done: wrap
3302 * back to the start of the file
3303 */
3304 scanned = 1;
3305 index = 0;
3306 goto retry;
3307 }
3308 flush_write_bio(&epd);
3309 return ret;
3310}
3311
Chris Masond1310b22008-01-24 16:13:08 -05003312/**
Chris Mason4bef0842008-09-08 11:18:08 -04003313 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05003314 * @mapping: address space structure to write
3315 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3316 * @writepage: function called for each page
3317 * @data: data passed to writepage function
3318 *
3319 * If a page is already under I/O, write_cache_pages() skips it, even
3320 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3321 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3322 * and msync() need to guarantee that all the data which was dirty at the time
3323 * the call was made get new I/O started against them. If wbc->sync_mode is
3324 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3325 * existing IO to complete.
3326 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05003327static int extent_write_cache_pages(struct extent_io_tree *tree,
Chris Mason4bef0842008-09-08 11:18:08 -04003328 struct address_space *mapping,
3329 struct writeback_control *wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05003330 writepage_t writepage, void *data,
3331 void (*flush_fn)(void *))
Chris Masond1310b22008-01-24 16:13:08 -05003332{
Chris Masond1310b22008-01-24 16:13:08 -05003333 int ret = 0;
3334 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003335 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003336 struct pagevec pvec;
3337 int nr_pages;
3338 pgoff_t index;
3339 pgoff_t end; /* Inclusive */
3340 int scanned = 0;
Josef Bacikf7aaa062011-07-15 21:26:38 +00003341 int tag;
Chris Masond1310b22008-01-24 16:13:08 -05003342
Chris Masond1310b22008-01-24 16:13:08 -05003343 pagevec_init(&pvec, 0);
3344 if (wbc->range_cyclic) {
3345 index = mapping->writeback_index; /* Start from prev offset */
3346 end = -1;
3347 } else {
3348 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3349 end = wbc->range_end >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003350 scanned = 1;
3351 }
Josef Bacikf7aaa062011-07-15 21:26:38 +00003352 if (wbc->sync_mode == WB_SYNC_ALL)
3353 tag = PAGECACHE_TAG_TOWRITE;
3354 else
3355 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05003356retry:
Josef Bacikf7aaa062011-07-15 21:26:38 +00003357 if (wbc->sync_mode == WB_SYNC_ALL)
3358 tag_pages_for_writeback(mapping, index, end);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003359 while (!done && !nr_to_write_done && (index <= end) &&
Josef Bacikf7aaa062011-07-15 21:26:38 +00003360 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3361 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
Chris Masond1310b22008-01-24 16:13:08 -05003362 unsigned i;
3363
3364 scanned = 1;
3365 for (i = 0; i < nr_pages; i++) {
3366 struct page *page = pvec.pages[i];
3367
3368 /*
3369 * At this point we hold neither mapping->tree_lock nor
3370 * lock on the page itself: the page may be truncated or
3371 * invalidated (changing page->mapping to NULL), or even
3372 * swizzled back from swapper_space to tmpfs file
3373 * mapping
3374 */
Chris Mason01d658f2011-11-01 10:08:06 -04003375 if (tree->ops &&
3376 tree->ops->write_cache_pages_lock_hook) {
3377 tree->ops->write_cache_pages_lock_hook(page,
3378 data, flush_fn);
3379 } else {
3380 if (!trylock_page(page)) {
3381 flush_fn(data);
3382 lock_page(page);
3383 }
3384 }
Chris Masond1310b22008-01-24 16:13:08 -05003385
3386 if (unlikely(page->mapping != mapping)) {
3387 unlock_page(page);
3388 continue;
3389 }
3390
3391 if (!wbc->range_cyclic && page->index > end) {
3392 done = 1;
3393 unlock_page(page);
3394 continue;
3395 }
3396
Chris Masond2c3f4f2008-11-19 12:44:22 -05003397 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05003398 if (PageWriteback(page))
3399 flush_fn(data);
Chris Masond1310b22008-01-24 16:13:08 -05003400 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003401 }
Chris Masond1310b22008-01-24 16:13:08 -05003402
3403 if (PageWriteback(page) ||
3404 !clear_page_dirty_for_io(page)) {
3405 unlock_page(page);
3406 continue;
3407 }
3408
3409 ret = (*writepage)(page, wbc, data);
3410
3411 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3412 unlock_page(page);
3413 ret = 0;
3414 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003415 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05003416 done = 1;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003417
3418 /*
3419 * the filesystem may choose to bump up nr_to_write.
3420 * We have to make sure to honor the new nr_to_write
3421 * at any time
3422 */
3423 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05003424 }
3425 pagevec_release(&pvec);
3426 cond_resched();
3427 }
3428 if (!scanned && !done) {
3429 /*
3430 * We hit the last page and there is more work to be done: wrap
3431 * back to the start of the file
3432 */
3433 scanned = 1;
3434 index = 0;
3435 goto retry;
3436 }
Chris Masond1310b22008-01-24 16:13:08 -05003437 return ret;
3438}
Chris Masond1310b22008-01-24 16:13:08 -05003439
Chris Masonffbd5172009-04-20 15:50:09 -04003440static void flush_epd_write_bio(struct extent_page_data *epd)
3441{
3442 if (epd->bio) {
Jeff Mahoney355808c2011-10-03 23:23:14 -04003443 int rw = WRITE;
3444 int ret;
3445
Chris Masonffbd5172009-04-20 15:50:09 -04003446 if (epd->sync_io)
Jeff Mahoney355808c2011-10-03 23:23:14 -04003447 rw = WRITE_SYNC;
3448
3449 ret = submit_one_bio(rw, epd->bio, 0, 0);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003450 BUG_ON(ret < 0); /* -ENOMEM */
Chris Masonffbd5172009-04-20 15:50:09 -04003451 epd->bio = NULL;
3452 }
3453}
3454
Chris Masond2c3f4f2008-11-19 12:44:22 -05003455static noinline void flush_write_bio(void *data)
3456{
3457 struct extent_page_data *epd = data;
Chris Masonffbd5172009-04-20 15:50:09 -04003458 flush_epd_write_bio(epd);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003459}
3460
Chris Masond1310b22008-01-24 16:13:08 -05003461int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3462 get_extent_t *get_extent,
3463 struct writeback_control *wbc)
3464{
3465 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05003466 struct extent_page_data epd = {
3467 .bio = NULL,
3468 .tree = tree,
3469 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05003470 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04003471 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05003472 };
Chris Masond1310b22008-01-24 16:13:08 -05003473
Chris Masond1310b22008-01-24 16:13:08 -05003474 ret = __extent_writepage(page, wbc, &epd);
3475
Chris Masonffbd5172009-04-20 15:50:09 -04003476 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05003477 return ret;
3478}
Chris Masond1310b22008-01-24 16:13:08 -05003479
Chris Mason771ed682008-11-06 22:02:51 -05003480int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3481 u64 start, u64 end, get_extent_t *get_extent,
3482 int mode)
3483{
3484 int ret = 0;
3485 struct address_space *mapping = inode->i_mapping;
3486 struct page *page;
3487 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3488 PAGE_CACHE_SHIFT;
3489
3490 struct extent_page_data epd = {
3491 .bio = NULL,
3492 .tree = tree,
3493 .get_extent = get_extent,
3494 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04003495 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05003496 };
3497 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05003498 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05003499 .nr_to_write = nr_pages * 2,
3500 .range_start = start,
3501 .range_end = end + 1,
3502 };
3503
Chris Masond3977122009-01-05 21:25:51 -05003504 while (start <= end) {
Chris Mason771ed682008-11-06 22:02:51 -05003505 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3506 if (clear_page_dirty_for_io(page))
3507 ret = __extent_writepage(page, &wbc_writepages, &epd);
3508 else {
3509 if (tree->ops && tree->ops->writepage_end_io_hook)
3510 tree->ops->writepage_end_io_hook(page, start,
3511 start + PAGE_CACHE_SIZE - 1,
3512 NULL, 1);
3513 unlock_page(page);
3514 }
3515 page_cache_release(page);
3516 start += PAGE_CACHE_SIZE;
3517 }
3518
Chris Masonffbd5172009-04-20 15:50:09 -04003519 flush_epd_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05003520 return ret;
3521}
Chris Masond1310b22008-01-24 16:13:08 -05003522
3523int extent_writepages(struct extent_io_tree *tree,
3524 struct address_space *mapping,
3525 get_extent_t *get_extent,
3526 struct writeback_control *wbc)
3527{
3528 int ret = 0;
3529 struct extent_page_data epd = {
3530 .bio = NULL,
3531 .tree = tree,
3532 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05003533 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04003534 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05003535 };
3536
Chris Mason4bef0842008-09-08 11:18:08 -04003537 ret = extent_write_cache_pages(tree, mapping, wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05003538 __extent_writepage, &epd,
3539 flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04003540 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05003541 return ret;
3542}
Chris Masond1310b22008-01-24 16:13:08 -05003543
3544int extent_readpages(struct extent_io_tree *tree,
3545 struct address_space *mapping,
3546 struct list_head *pages, unsigned nr_pages,
3547 get_extent_t get_extent)
3548{
3549 struct bio *bio = NULL;
3550 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04003551 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003552
Chris Masond1310b22008-01-24 16:13:08 -05003553 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3554 struct page *page = list_entry(pages->prev, struct page, lru);
3555
3556 prefetchw(&page->flags);
3557 list_del(&page->lru);
Nick Piggin28ecb6092010-03-17 13:31:04 +00003558 if (!add_to_page_cache_lru(page, mapping,
Itaru Kitayama43e817a2011-04-25 19:43:51 -04003559 page->index, GFP_NOFS)) {
Chris Masonf1885912008-04-09 16:28:12 -04003560 __extent_read_full_page(tree, page, get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04003561 &bio, 0, &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003562 }
3563 page_cache_release(page);
3564 }
Chris Masond1310b22008-01-24 16:13:08 -05003565 BUG_ON(!list_empty(pages));
3566 if (bio)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003567 return submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003568 return 0;
3569}
Chris Masond1310b22008-01-24 16:13:08 -05003570
3571/*
3572 * basic invalidatepage code, this waits on any locked or writeback
3573 * ranges corresponding to the page, and then deletes any extent state
3574 * records from the tree
3575 */
3576int extent_invalidatepage(struct extent_io_tree *tree,
3577 struct page *page, unsigned long offset)
3578{
Josef Bacik2ac55d42010-02-03 19:33:23 +00003579 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003580 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3581 u64 end = start + PAGE_CACHE_SIZE - 1;
3582 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3583
Chris Masond3977122009-01-05 21:25:51 -05003584 start += (offset + blocksize - 1) & ~(blocksize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05003585 if (start > end)
3586 return 0;
3587
Jeff Mahoneyd0082372012-03-01 14:57:19 +01003588 lock_extent_bits(tree, start, end, 0, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04003589 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05003590 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04003591 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3592 EXTENT_DO_ACCOUNTING,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003593 1, 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05003594 return 0;
3595}
Chris Masond1310b22008-01-24 16:13:08 -05003596
3597/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04003598 * a helper for releasepage, this tests for areas of the page that
3599 * are locked or under IO and drops the related state bits if it is safe
3600 * to drop the page.
3601 */
3602int try_release_extent_state(struct extent_map_tree *map,
3603 struct extent_io_tree *tree, struct page *page,
3604 gfp_t mask)
3605{
3606 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3607 u64 end = start + PAGE_CACHE_SIZE - 1;
3608 int ret = 1;
3609
Chris Mason211f90e2008-07-18 11:56:15 -04003610 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04003611 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04003612 ret = 0;
3613 else {
3614 if ((mask & GFP_NOFS) == GFP_NOFS)
3615 mask = GFP_NOFS;
Chris Mason11ef1602009-09-23 20:28:46 -04003616 /*
3617 * at this point we can safely clear everything except the
3618 * locked bit and the nodatasum bit
3619 */
Chris Masone3f24cc2011-02-14 12:52:08 -05003620 ret = clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04003621 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3622 0, 0, NULL, mask);
Chris Masone3f24cc2011-02-14 12:52:08 -05003623
3624 /* if clear_extent_bit failed for enomem reasons,
3625 * we can't allow the release to continue.
3626 */
3627 if (ret < 0)
3628 ret = 0;
3629 else
3630 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04003631 }
3632 return ret;
3633}
Chris Mason7b13b7b2008-04-18 10:29:50 -04003634
3635/*
Chris Masond1310b22008-01-24 16:13:08 -05003636 * a helper for releasepage. As long as there are no locked extents
3637 * in the range corresponding to the page, both state records and extent
3638 * map records are removed
3639 */
3640int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05003641 struct extent_io_tree *tree, struct page *page,
3642 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05003643{
3644 struct extent_map *em;
3645 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3646 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04003647
Chris Mason70dec802008-01-29 09:59:12 -05003648 if ((mask & __GFP_WAIT) &&
3649 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05003650 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05003651 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05003652 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04003653 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05003654 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09003655 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04003656 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003657 break;
3658 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003659 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3660 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04003661 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003662 free_extent_map(em);
3663 break;
3664 }
3665 if (!test_range_bit(tree, em->start,
3666 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04003667 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04003668 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05003669 remove_extent_mapping(map, em);
3670 /* once for the rb tree */
3671 free_extent_map(em);
3672 }
3673 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04003674 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003675
3676 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05003677 free_extent_map(em);
3678 }
Chris Masond1310b22008-01-24 16:13:08 -05003679 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04003680 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05003681}
Chris Masond1310b22008-01-24 16:13:08 -05003682
Chris Masonec29ed52011-02-23 16:23:20 -05003683/*
3684 * helper function for fiemap, which doesn't want to see any holes.
3685 * This maps until we find something past 'last'
3686 */
3687static struct extent_map *get_extent_skip_holes(struct inode *inode,
3688 u64 offset,
3689 u64 last,
3690 get_extent_t *get_extent)
3691{
3692 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3693 struct extent_map *em;
3694 u64 len;
3695
3696 if (offset >= last)
3697 return NULL;
3698
3699 while(1) {
3700 len = last - offset;
3701 if (len == 0)
3702 break;
3703 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3704 em = get_extent(inode, NULL, 0, offset, len, 0);
David Sterbac7040052011-04-19 18:00:01 +02003705 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05003706 return em;
3707
3708 /* if this isn't a hole return it */
3709 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3710 em->block_start != EXTENT_MAP_HOLE) {
3711 return em;
3712 }
3713
3714 /* this is a hole, advance to the next extent */
3715 offset = extent_map_end(em);
3716 free_extent_map(em);
3717 if (offset >= last)
3718 break;
3719 }
3720 return NULL;
3721}
3722
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003723int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3724 __u64 start, __u64 len, get_extent_t *get_extent)
3725{
Josef Bacik975f84f2010-11-23 19:36:57 +00003726 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003727 u64 off = start;
3728 u64 max = start + len;
3729 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00003730 u32 found_type;
3731 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05003732 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003733 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05003734 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00003735 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003736 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00003737 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00003738 struct btrfs_path *path;
3739 struct btrfs_file_extent_item *item;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003740 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05003741 u64 em_start = 0;
3742 u64 em_len = 0;
3743 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003744 unsigned long emflags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003745
3746 if (len == 0)
3747 return -EINVAL;
3748
Josef Bacik975f84f2010-11-23 19:36:57 +00003749 path = btrfs_alloc_path();
3750 if (!path)
3751 return -ENOMEM;
3752 path->leave_spinning = 1;
3753
Josef Bacik4d479cf2011-11-17 11:34:31 -05003754 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3755 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3756
Chris Masonec29ed52011-02-23 16:23:20 -05003757 /*
3758 * lookup the last file extent. We're not using i_size here
3759 * because there might be preallocation past i_size
3760 */
Josef Bacik975f84f2010-11-23 19:36:57 +00003761 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
Li Zefan33345d012011-04-20 10:31:50 +08003762 path, btrfs_ino(inode), -1, 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00003763 if (ret < 0) {
3764 btrfs_free_path(path);
3765 return ret;
3766 }
3767 WARN_ON(!ret);
3768 path->slots[0]--;
3769 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3770 struct btrfs_file_extent_item);
3771 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3772 found_type = btrfs_key_type(&found_key);
3773
Chris Masonec29ed52011-02-23 16:23:20 -05003774 /* No extents, but there might be delalloc bits */
Li Zefan33345d012011-04-20 10:31:50 +08003775 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00003776 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05003777 /* have to trust i_size as the end */
3778 last = (u64)-1;
3779 last_for_get_extent = isize;
3780 } else {
3781 /*
3782 * remember the start of the last extent. There are a
3783 * bunch of different factors that go into the length of the
3784 * extent, so its much less complex to remember where it started
3785 */
3786 last = found_key.offset;
3787 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00003788 }
Josef Bacik975f84f2010-11-23 19:36:57 +00003789 btrfs_free_path(path);
3790
Chris Masonec29ed52011-02-23 16:23:20 -05003791 /*
3792 * we might have some extents allocated but more delalloc past those
3793 * extents. so, we trust isize unless the start of the last extent is
3794 * beyond isize
3795 */
3796 if (last < isize) {
3797 last = (u64)-1;
3798 last_for_get_extent = isize;
3799 }
3800
Josef Bacik2ac55d42010-02-03 19:33:23 +00003801 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01003802 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05003803
Josef Bacik4d479cf2011-11-17 11:34:31 -05003804 em = get_extent_skip_holes(inode, start, last_for_get_extent,
Chris Masonec29ed52011-02-23 16:23:20 -05003805 get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003806 if (!em)
3807 goto out;
3808 if (IS_ERR(em)) {
3809 ret = PTR_ERR(em);
3810 goto out;
3811 }
Josef Bacik975f84f2010-11-23 19:36:57 +00003812
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003813 while (!end) {
Chris Masonea8efc72011-03-08 11:54:40 -05003814 u64 offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003815
Chris Masonea8efc72011-03-08 11:54:40 -05003816 /* break if the extent we found is outside the range */
3817 if (em->start >= max || extent_map_end(em) < off)
3818 break;
3819
3820 /*
3821 * get_extent may return an extent that starts before our
3822 * requested range. We have to make sure the ranges
3823 * we return to fiemap always move forward and don't
3824 * overlap, so adjust the offsets here
3825 */
3826 em_start = max(em->start, off);
3827
3828 /*
3829 * record the offset from the start of the extent
3830 * for adjusting the disk offset below
3831 */
3832 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05003833 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05003834 em_len = em_end - em_start;
Chris Masonec29ed52011-02-23 16:23:20 -05003835 emflags = em->flags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003836 disko = 0;
3837 flags = 0;
3838
Chris Masonea8efc72011-03-08 11:54:40 -05003839 /*
3840 * bump off for our next call to get_extent
3841 */
3842 off = extent_map_end(em);
3843 if (off >= max)
3844 end = 1;
3845
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003846 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003847 end = 1;
3848 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003849 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003850 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3851 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003852 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003853 flags |= (FIEMAP_EXTENT_DELALLOC |
3854 FIEMAP_EXTENT_UNKNOWN);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003855 } else {
Chris Masonea8efc72011-03-08 11:54:40 -05003856 disko = em->block_start + offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003857 }
3858 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3859 flags |= FIEMAP_EXTENT_ENCODED;
3860
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003861 free_extent_map(em);
3862 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05003863 if ((em_start >= last) || em_len == (u64)-1 ||
3864 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003865 flags |= FIEMAP_EXTENT_LAST;
3866 end = 1;
3867 }
3868
Chris Masonec29ed52011-02-23 16:23:20 -05003869 /* now scan forward to see if this is really the last extent. */
3870 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3871 get_extent);
3872 if (IS_ERR(em)) {
3873 ret = PTR_ERR(em);
3874 goto out;
3875 }
3876 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00003877 flags |= FIEMAP_EXTENT_LAST;
3878 end = 1;
3879 }
Chris Masonec29ed52011-02-23 16:23:20 -05003880 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3881 em_len, flags);
3882 if (ret)
3883 goto out_free;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003884 }
3885out_free:
3886 free_extent_map(em);
3887out:
Josef Bacik2ac55d42010-02-03 19:33:23 +00003888 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3889 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003890 return ret;
3891}
3892
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02003893inline struct page *extent_buffer_page(struct extent_buffer *eb,
Chris Masond1310b22008-01-24 16:13:08 -05003894 unsigned long i)
3895{
Chris Mason727011e2010-08-06 13:21:20 -04003896 return eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05003897}
3898
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02003899inline unsigned long num_extent_pages(u64 start, u64 len)
Chris Masonce9adaa2008-04-09 16:28:12 -04003900{
Chris Mason6af118c2008-07-22 11:18:07 -04003901 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3902 (start >> PAGE_CACHE_SHIFT);
Chris Mason728131d2008-04-09 16:28:12 -04003903}
3904
Chris Mason727011e2010-08-06 13:21:20 -04003905static void __free_extent_buffer(struct extent_buffer *eb)
3906{
3907#if LEAK_DEBUG
3908 unsigned long flags;
3909 spin_lock_irqsave(&leak_lock, flags);
3910 list_del(&eb->leak_list);
3911 spin_unlock_irqrestore(&leak_lock, flags);
3912#endif
3913 if (eb->pages && eb->pages != eb->inline_pages)
3914 kfree(eb->pages);
3915 kmem_cache_free(extent_buffer_cache, eb);
3916}
3917
Chris Masond1310b22008-01-24 16:13:08 -05003918static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3919 u64 start,
3920 unsigned long len,
3921 gfp_t mask)
3922{
3923 struct extent_buffer *eb = NULL;
Chris Mason39351272009-02-04 09:24:05 -05003924#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003925 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -04003926#endif
Chris Masond1310b22008-01-24 16:13:08 -05003927
Chris Masond1310b22008-01-24 16:13:08 -05003928 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Tsutomu Itoh91ca3382011-01-05 02:32:22 +00003929 if (eb == NULL)
3930 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003931 eb->start = start;
3932 eb->len = len;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003933 eb->tree = tree;
Chris Masonbd681512011-07-16 15:23:14 -04003934 rwlock_init(&eb->lock);
3935 atomic_set(&eb->write_locks, 0);
3936 atomic_set(&eb->read_locks, 0);
3937 atomic_set(&eb->blocking_readers, 0);
3938 atomic_set(&eb->blocking_writers, 0);
3939 atomic_set(&eb->spinning_readers, 0);
3940 atomic_set(&eb->spinning_writers, 0);
Arne Jansen5b25f702011-09-13 10:55:48 +02003941 eb->lock_nested = 0;
Chris Masonbd681512011-07-16 15:23:14 -04003942 init_waitqueue_head(&eb->write_lock_wq);
3943 init_waitqueue_head(&eb->read_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -05003944
Chris Mason39351272009-02-04 09:24:05 -05003945#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003946 spin_lock_irqsave(&leak_lock, flags);
3947 list_add(&eb->leak_list, &buffers);
3948 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003949#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05003950 spin_lock_init(&eb->refs_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003951 atomic_set(&eb->refs, 1);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003952 atomic_set(&eb->io_pages, 0);
Chris Mason727011e2010-08-06 13:21:20 -04003953
3954 if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3955 struct page **pages;
3956 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3957 PAGE_CACHE_SHIFT;
3958 pages = kzalloc(num_pages, mask);
3959 if (!pages) {
3960 __free_extent_buffer(eb);
3961 return NULL;
3962 }
3963 eb->pages = pages;
3964 } else {
3965 eb->pages = eb->inline_pages;
3966 }
Chris Masond1310b22008-01-24 16:13:08 -05003967
3968 return eb;
3969}
3970
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003971static int extent_buffer_under_io(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05003972{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003973 return (atomic_read(&eb->io_pages) ||
3974 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3975 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05003976}
3977
Miao Xie897ca6e2010-10-26 20:57:29 -04003978/*
3979 * Helper for releasing extent buffer page.
3980 */
3981static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3982 unsigned long start_idx)
3983{
3984 unsigned long index;
3985 struct page *page;
3986
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003987 BUG_ON(extent_buffer_under_io(eb));
Miao Xie897ca6e2010-10-26 20:57:29 -04003988
3989 index = num_extent_pages(eb->start, eb->len);
3990 if (start_idx >= index)
3991 return;
3992
3993 do {
3994 index--;
3995 page = extent_buffer_page(eb, index);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003996 if (page) {
3997 spin_lock(&page->mapping->private_lock);
3998 /*
3999 * We do this since we'll remove the pages after we've
4000 * removed the eb from the radix tree, so we could race
4001 * and have this page now attached to the new eb. So
4002 * only clear page_private if it's still connected to
4003 * this eb.
4004 */
4005 if (PagePrivate(page) &&
4006 page->private == (unsigned long)eb) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004007 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Josef Bacik3083ee22012-03-09 16:01:49 -05004008 BUG_ON(PageDirty(page));
4009 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004010 /*
4011 * We need to make sure we haven't be attached
4012 * to a new eb.
4013 */
4014 ClearPagePrivate(page);
4015 set_page_private(page, 0);
4016 /* One for the page private */
4017 page_cache_release(page);
4018 }
4019 spin_unlock(&page->mapping->private_lock);
4020
4021 /* One for when we alloced the page */
Miao Xie897ca6e2010-10-26 20:57:29 -04004022 page_cache_release(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004023 }
Miao Xie897ca6e2010-10-26 20:57:29 -04004024 } while (index != start_idx);
4025}
4026
4027/*
4028 * Helper for releasing the extent buffer.
4029 */
4030static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4031{
4032 btrfs_release_extent_buffer_page(eb, 0);
4033 __free_extent_buffer(eb);
4034}
4035
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004036static void check_buffer_tree_ref(struct extent_buffer *eb)
4037{
4038 /* the ref bit is tricky. We have to make sure it is set
4039 * if we have the buffer dirty. Otherwise the
4040 * code to free a buffer can end up dropping a dirty
4041 * page
4042 *
4043 * Once the ref bit is set, it won't go away while the
4044 * buffer is dirty or in writeback, and it also won't
4045 * go away while we have the reference count on the
4046 * eb bumped.
4047 *
4048 * We can't just set the ref bit without bumping the
4049 * ref on the eb because free_extent_buffer might
4050 * see the ref bit and try to clear it. If this happens
4051 * free_extent_buffer might end up dropping our original
4052 * ref by mistake and freeing the page before we are able
4053 * to add one more ref.
4054 *
4055 * So bump the ref count first, then set the bit. If someone
4056 * beat us to it, drop the ref we added.
4057 */
4058 if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4059 atomic_inc(&eb->refs);
4060 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4061 atomic_dec(&eb->refs);
4062 }
4063}
4064
Josef Bacik5df42352012-03-15 18:24:42 -04004065static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4066{
4067 unsigned long num_pages, i;
4068
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004069 check_buffer_tree_ref(eb);
4070
Josef Bacik5df42352012-03-15 18:24:42 -04004071 num_pages = num_extent_pages(eb->start, eb->len);
4072 for (i = 0; i < num_pages; i++) {
4073 struct page *p = extent_buffer_page(eb, i);
4074 mark_page_accessed(p);
4075 }
4076}
4077
Chris Masond1310b22008-01-24 16:13:08 -05004078struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
Chris Mason727011e2010-08-06 13:21:20 -04004079 u64 start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05004080{
4081 unsigned long num_pages = num_extent_pages(start, len);
4082 unsigned long i;
4083 unsigned long index = start >> PAGE_CACHE_SHIFT;
4084 struct extent_buffer *eb;
Chris Mason6af118c2008-07-22 11:18:07 -04004085 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05004086 struct page *p;
4087 struct address_space *mapping = tree->mapping;
4088 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04004089 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004090
Miao Xie19fe0a82010-10-26 20:57:29 -04004091 rcu_read_lock();
4092 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4093 if (eb && atomic_inc_not_zero(&eb->refs)) {
4094 rcu_read_unlock();
Josef Bacik5df42352012-03-15 18:24:42 -04004095 mark_extent_buffer_accessed(eb);
Chris Mason6af118c2008-07-22 11:18:07 -04004096 return eb;
4097 }
Miao Xie19fe0a82010-10-26 20:57:29 -04004098 rcu_read_unlock();
Chris Mason6af118c2008-07-22 11:18:07 -04004099
David Sterbaba144192011-04-21 01:12:06 +02004100 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
Peter2b114d12008-04-01 11:21:40 -04004101 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05004102 return NULL;
4103
Chris Mason727011e2010-08-06 13:21:20 -04004104 for (i = 0; i < num_pages; i++, index++) {
Chris Masona6591712011-07-19 12:04:14 -04004105 p = find_or_create_page(mapping, index, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05004106 if (!p) {
4107 WARN_ON(1);
Chris Mason6af118c2008-07-22 11:18:07 -04004108 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05004109 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004110
4111 spin_lock(&mapping->private_lock);
4112 if (PagePrivate(p)) {
4113 /*
4114 * We could have already allocated an eb for this page
4115 * and attached one so lets see if we can get a ref on
4116 * the existing eb, and if we can we know it's good and
4117 * we can just return that one, else we know we can just
4118 * overwrite page->private.
4119 */
4120 exists = (struct extent_buffer *)p->private;
4121 if (atomic_inc_not_zero(&exists->refs)) {
4122 spin_unlock(&mapping->private_lock);
4123 unlock_page(p);
Josef Bacik17de39a2012-05-04 15:16:06 -04004124 page_cache_release(p);
Josef Bacik5df42352012-03-15 18:24:42 -04004125 mark_extent_buffer_accessed(exists);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004126 goto free_eb;
4127 }
4128
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004129 /*
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004130 * Do this so attach doesn't complain and we need to
4131 * drop the ref the old guy had.
4132 */
4133 ClearPagePrivate(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004134 WARN_ON(PageDirty(p));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004135 page_cache_release(p);
Chris Masond1310b22008-01-24 16:13:08 -05004136 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004137 attach_extent_buffer_page(eb, p);
4138 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004139 WARN_ON(PageDirty(p));
Chris Masond1310b22008-01-24 16:13:08 -05004140 mark_page_accessed(p);
Chris Mason727011e2010-08-06 13:21:20 -04004141 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05004142 if (!PageUptodate(p))
4143 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05004144
4145 /*
4146 * see below about how we avoid a nasty race with release page
4147 * and why we unlock later
4148 */
Chris Masond1310b22008-01-24 16:13:08 -05004149 }
4150 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004151 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05004152again:
Miao Xie19fe0a82010-10-26 20:57:29 -04004153 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4154 if (ret)
4155 goto free_eb;
4156
Chris Mason6af118c2008-07-22 11:18:07 -04004157 spin_lock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004158 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4159 if (ret == -EEXIST) {
4160 exists = radix_tree_lookup(&tree->buffer,
4161 start >> PAGE_CACHE_SHIFT);
Josef Bacik115391d2012-03-09 09:51:43 -05004162 if (!atomic_inc_not_zero(&exists->refs)) {
4163 spin_unlock(&tree->buffer_lock);
4164 radix_tree_preload_end();
Josef Bacik115391d2012-03-09 09:51:43 -05004165 exists = NULL;
4166 goto again;
4167 }
Chris Mason6af118c2008-07-22 11:18:07 -04004168 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004169 radix_tree_preload_end();
Josef Bacik5df42352012-03-15 18:24:42 -04004170 mark_extent_buffer_accessed(exists);
Chris Mason6af118c2008-07-22 11:18:07 -04004171 goto free_eb;
4172 }
Chris Mason6af118c2008-07-22 11:18:07 -04004173 /* add one reference for the tree */
Josef Bacik3083ee22012-03-09 16:01:49 -05004174 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004175 check_buffer_tree_ref(eb);
Josef Bacik3083ee22012-03-09 16:01:49 -05004176 spin_unlock(&eb->refs_lock);
Yan, Zhengf044ba72010-02-04 08:46:56 +00004177 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004178 radix_tree_preload_end();
Chris Masoneb14ab82011-02-10 12:35:00 -05004179
4180 /*
4181 * there is a race where release page may have
4182 * tried to find this extent buffer in the radix
4183 * but failed. It will tell the VM it is safe to
4184 * reclaim the, and it will clear the page private bit.
4185 * We must make sure to set the page private bit properly
4186 * after the extent buffer is in the radix tree so
4187 * it doesn't get lost
4188 */
Chris Mason727011e2010-08-06 13:21:20 -04004189 SetPageChecked(eb->pages[0]);
4190 for (i = 1; i < num_pages; i++) {
4191 p = extent_buffer_page(eb, i);
Chris Mason727011e2010-08-06 13:21:20 -04004192 ClearPageChecked(p);
4193 unlock_page(p);
4194 }
4195 unlock_page(eb->pages[0]);
Chris Masond1310b22008-01-24 16:13:08 -05004196 return eb;
4197
Chris Mason6af118c2008-07-22 11:18:07 -04004198free_eb:
Chris Mason727011e2010-08-06 13:21:20 -04004199 for (i = 0; i < num_pages; i++) {
4200 if (eb->pages[i])
4201 unlock_page(eb->pages[i]);
4202 }
Chris Masoneb14ab82011-02-10 12:35:00 -05004203
Josef Bacik17de39a2012-05-04 15:16:06 -04004204 WARN_ON(!atomic_dec_and_test(&eb->refs));
Miao Xie897ca6e2010-10-26 20:57:29 -04004205 btrfs_release_extent_buffer(eb);
Chris Mason6af118c2008-07-22 11:18:07 -04004206 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05004207}
Chris Masond1310b22008-01-24 16:13:08 -05004208
4209struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
David Sterbaf09d1f62011-04-21 01:08:01 +02004210 u64 start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05004211{
Chris Masond1310b22008-01-24 16:13:08 -05004212 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -05004213
Miao Xie19fe0a82010-10-26 20:57:29 -04004214 rcu_read_lock();
4215 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4216 if (eb && atomic_inc_not_zero(&eb->refs)) {
4217 rcu_read_unlock();
Josef Bacik5df42352012-03-15 18:24:42 -04004218 mark_extent_buffer_accessed(eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04004219 return eb;
4220 }
4221 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04004222
Miao Xie19fe0a82010-10-26 20:57:29 -04004223 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05004224}
Chris Masond1310b22008-01-24 16:13:08 -05004225
Josef Bacik3083ee22012-03-09 16:01:49 -05004226static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4227{
4228 struct extent_buffer *eb =
4229 container_of(head, struct extent_buffer, rcu_head);
4230
4231 __free_extent_buffer(eb);
4232}
4233
Josef Bacik3083ee22012-03-09 16:01:49 -05004234/* Expects to have eb->eb_lock already held */
4235static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4236{
4237 WARN_ON(atomic_read(&eb->refs) == 0);
4238 if (atomic_dec_and_test(&eb->refs)) {
4239 struct extent_io_tree *tree = eb->tree;
Josef Bacik3083ee22012-03-09 16:01:49 -05004240
4241 spin_unlock(&eb->refs_lock);
4242
Josef Bacik3083ee22012-03-09 16:01:49 -05004243 spin_lock(&tree->buffer_lock);
4244 radix_tree_delete(&tree->buffer,
4245 eb->start >> PAGE_CACHE_SHIFT);
4246 spin_unlock(&tree->buffer_lock);
4247
4248 /* Should be safe to release our pages at this point */
4249 btrfs_release_extent_buffer_page(eb, 0);
4250
4251 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4252 return;
4253 }
4254 spin_unlock(&eb->refs_lock);
4255}
4256
Chris Masond1310b22008-01-24 16:13:08 -05004257void free_extent_buffer(struct extent_buffer *eb)
4258{
Chris Masond1310b22008-01-24 16:13:08 -05004259 if (!eb)
4260 return;
4261
Josef Bacik3083ee22012-03-09 16:01:49 -05004262 spin_lock(&eb->refs_lock);
4263 if (atomic_read(&eb->refs) == 2 &&
4264 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004265 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004266 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4267 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05004268
Josef Bacik3083ee22012-03-09 16:01:49 -05004269 /*
4270 * I know this is terrible, but it's temporary until we stop tracking
4271 * the uptodate bits and such for the extent buffers.
4272 */
4273 release_extent_buffer(eb, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05004274}
Chris Masond1310b22008-01-24 16:13:08 -05004275
Josef Bacik3083ee22012-03-09 16:01:49 -05004276void free_extent_buffer_stale(struct extent_buffer *eb)
4277{
4278 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05004279 return;
4280
Josef Bacik3083ee22012-03-09 16:01:49 -05004281 spin_lock(&eb->refs_lock);
4282 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4283
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004284 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004285 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4286 atomic_dec(&eb->refs);
4287 release_extent_buffer(eb, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05004288}
4289
Chris Mason1d4284b2012-03-28 20:31:37 -04004290void clear_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004291{
Chris Masond1310b22008-01-24 16:13:08 -05004292 unsigned long i;
4293 unsigned long num_pages;
4294 struct page *page;
4295
Chris Masond1310b22008-01-24 16:13:08 -05004296 num_pages = num_extent_pages(eb->start, eb->len);
4297
4298 for (i = 0; i < num_pages; i++) {
4299 page = extent_buffer_page(eb, i);
Chris Masonb9473432009-03-13 11:00:37 -04004300 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05004301 continue;
4302
Chris Masona61e6f22008-07-22 11:18:08 -04004303 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05004304 WARN_ON(!PagePrivate(page));
4305
Chris Masond1310b22008-01-24 16:13:08 -05004306 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04004307 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05004308 if (!PageDirty(page)) {
4309 radix_tree_tag_clear(&page->mapping->page_tree,
4310 page_index(page),
4311 PAGECACHE_TAG_DIRTY);
4312 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04004313 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masonbf0da8c2011-11-04 12:29:37 -04004314 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04004315 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05004316 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004317 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05004318}
Chris Masond1310b22008-01-24 16:13:08 -05004319
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004320int set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004321{
4322 unsigned long i;
4323 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04004324 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004325
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004326 check_buffer_tree_ref(eb);
4327
Chris Masonb9473432009-03-13 11:00:37 -04004328 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004329
Chris Masond1310b22008-01-24 16:13:08 -05004330 num_pages = num_extent_pages(eb->start, eb->len);
Josef Bacik3083ee22012-03-09 16:01:49 -05004331 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004332 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4333
Chris Masonb9473432009-03-13 11:00:37 -04004334 for (i = 0; i < num_pages; i++)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004335 set_page_dirty(extent_buffer_page(eb, i));
Chris Masonb9473432009-03-13 11:00:37 -04004336 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05004337}
Chris Masond1310b22008-01-24 16:13:08 -05004338
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004339static int range_straddles_pages(u64 start, u64 len)
Chris Mason19b6caf2011-07-25 06:50:50 -04004340{
4341 if (len < PAGE_CACHE_SIZE)
4342 return 1;
4343 if (start & (PAGE_CACHE_SIZE - 1))
4344 return 1;
4345 if ((start + len) & (PAGE_CACHE_SIZE - 1))
4346 return 1;
4347 return 0;
4348}
4349
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004350int clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04004351{
4352 unsigned long i;
4353 struct page *page;
4354 unsigned long num_pages;
4355
Chris Masonb4ce94d2009-02-04 09:25:08 -05004356 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004357 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason1259ab72008-05-12 13:39:03 -04004358 for (i = 0; i < num_pages; i++) {
4359 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04004360 if (page)
4361 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04004362 }
4363 return 0;
4364}
4365
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004366int set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004367{
4368 unsigned long i;
4369 struct page *page;
4370 unsigned long num_pages;
4371
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004372 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05004373 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05004374 for (i = 0; i < num_pages; i++) {
4375 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004376 SetPageUptodate(page);
4377 }
4378 return 0;
4379}
Chris Masond1310b22008-01-24 16:13:08 -05004380
Chris Masonce9adaa2008-04-09 16:28:12 -04004381int extent_range_uptodate(struct extent_io_tree *tree,
4382 u64 start, u64 end)
4383{
4384 struct page *page;
4385 int ret;
4386 int pg_uptodate = 1;
4387 int uptodate;
4388 unsigned long index;
4389
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004390 if (range_straddles_pages(start, end - start + 1)) {
Chris Mason19b6caf2011-07-25 06:50:50 -04004391 ret = test_range_bit(tree, start, end,
4392 EXTENT_UPTODATE, 1, NULL);
4393 if (ret)
4394 return 1;
4395 }
Chris Masond3977122009-01-05 21:25:51 -05004396 while (start <= end) {
Chris Masonce9adaa2008-04-09 16:28:12 -04004397 index = start >> PAGE_CACHE_SHIFT;
4398 page = find_get_page(tree->mapping, index);
Mitch Harder8bedd512012-01-26 15:01:11 -05004399 if (!page)
4400 return 1;
Chris Masonce9adaa2008-04-09 16:28:12 -04004401 uptodate = PageUptodate(page);
4402 page_cache_release(page);
4403 if (!uptodate) {
4404 pg_uptodate = 0;
4405 break;
4406 }
4407 start += PAGE_CACHE_SIZE;
4408 }
4409 return pg_uptodate;
4410}
4411
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004412int extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004413{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004414 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05004415}
Chris Masond1310b22008-01-24 16:13:08 -05004416
4417int read_extent_buffer_pages(struct extent_io_tree *tree,
Arne Jansenbb82ab82011-06-10 14:06:53 +02004418 struct extent_buffer *eb, u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04004419 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05004420{
4421 unsigned long i;
4422 unsigned long start_i;
4423 struct page *page;
4424 int err;
4425 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04004426 int locked_pages = 0;
4427 int all_uptodate = 1;
Chris Masond1310b22008-01-24 16:13:08 -05004428 unsigned long num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04004429 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05004430 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04004431 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05004432
Chris Masonb4ce94d2009-02-04 09:25:08 -05004433 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05004434 return 0;
4435
Chris Masond1310b22008-01-24 16:13:08 -05004436 if (start) {
4437 WARN_ON(start < eb->start);
4438 start_i = (start >> PAGE_CACHE_SHIFT) -
4439 (eb->start >> PAGE_CACHE_SHIFT);
4440 } else {
4441 start_i = 0;
4442 }
4443
4444 num_pages = num_extent_pages(eb->start, eb->len);
4445 for (i = start_i; i < num_pages; i++) {
4446 page = extent_buffer_page(eb, i);
Arne Jansenbb82ab82011-06-10 14:06:53 +02004447 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04004448 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04004449 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05004450 } else {
4451 lock_page(page);
4452 }
Chris Masonce9adaa2008-04-09 16:28:12 -04004453 locked_pages++;
Chris Mason727011e2010-08-06 13:21:20 -04004454 if (!PageUptodate(page)) {
4455 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04004456 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04004457 }
Chris Masonce9adaa2008-04-09 16:28:12 -04004458 }
4459 if (all_uptodate) {
4460 if (start_i == 0)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004461 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04004462 goto unlock_exit;
4463 }
4464
Josef Bacikea466792012-03-26 21:57:36 -04004465 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04004466 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004467 atomic_set(&eb->io_pages, num_reads);
Chris Masonce9adaa2008-04-09 16:28:12 -04004468 for (i = start_i; i < num_pages; i++) {
4469 page = extent_buffer_page(eb, i);
Chris Masonce9adaa2008-04-09 16:28:12 -04004470 if (!PageUptodate(page)) {
Chris Masonf1885912008-04-09 16:28:12 -04004471 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05004472 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04004473 get_extent, &bio,
Chris Masonc8b97812008-10-29 14:49:59 -04004474 mirror_num, &bio_flags);
Chris Masond3977122009-01-05 21:25:51 -05004475 if (err)
Chris Masond1310b22008-01-24 16:13:08 -05004476 ret = err;
Chris Masond1310b22008-01-24 16:13:08 -05004477 } else {
4478 unlock_page(page);
4479 }
4480 }
4481
Jeff Mahoney355808c2011-10-03 23:23:14 -04004482 if (bio) {
4483 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004484 if (err)
4485 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04004486 }
Chris Masona86c12c2008-02-07 10:50:54 -05004487
Arne Jansenbb82ab82011-06-10 14:06:53 +02004488 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05004489 return ret;
Chris Masond3977122009-01-05 21:25:51 -05004490
Chris Masond1310b22008-01-24 16:13:08 -05004491 for (i = start_i; i < num_pages; i++) {
4492 page = extent_buffer_page(eb, i);
4493 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05004494 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05004495 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05004496 }
Chris Masond3977122009-01-05 21:25:51 -05004497
Chris Masond1310b22008-01-24 16:13:08 -05004498 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04004499
4500unlock_exit:
4501 i = start_i;
Chris Masond3977122009-01-05 21:25:51 -05004502 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04004503 page = extent_buffer_page(eb, i);
4504 i++;
4505 unlock_page(page);
4506 locked_pages--;
4507 }
4508 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004509}
Chris Masond1310b22008-01-24 16:13:08 -05004510
4511void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4512 unsigned long start,
4513 unsigned long len)
4514{
4515 size_t cur;
4516 size_t offset;
4517 struct page *page;
4518 char *kaddr;
4519 char *dst = (char *)dstv;
4520 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4521 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05004522
4523 WARN_ON(start > eb->len);
4524 WARN_ON(start + len > eb->start + eb->len);
4525
4526 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4527
Chris Masond3977122009-01-05 21:25:51 -05004528 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004529 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004530
4531 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04004532 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004533 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004534
4535 dst += cur;
4536 len -= cur;
4537 offset = 0;
4538 i++;
4539 }
4540}
Chris Masond1310b22008-01-24 16:13:08 -05004541
4542int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masona6591712011-07-19 12:04:14 -04004543 unsigned long min_len, char **map,
Chris Masond1310b22008-01-24 16:13:08 -05004544 unsigned long *map_start,
Chris Masona6591712011-07-19 12:04:14 -04004545 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05004546{
4547 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4548 char *kaddr;
4549 struct page *p;
4550 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4551 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4552 unsigned long end_i = (start_offset + start + min_len - 1) >>
4553 PAGE_CACHE_SHIFT;
4554
4555 if (i != end_i)
4556 return -EINVAL;
4557
4558 if (i == 0) {
4559 offset = start_offset;
4560 *map_start = 0;
4561 } else {
4562 offset = 0;
4563 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4564 }
Chris Masond3977122009-01-05 21:25:51 -05004565
Chris Masond1310b22008-01-24 16:13:08 -05004566 if (start + min_len > eb->len) {
Chris Masond3977122009-01-05 21:25:51 -05004567 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4568 "wanted %lu %lu\n", (unsigned long long)eb->start,
4569 eb->len, start, min_len);
Chris Masond1310b22008-01-24 16:13:08 -05004570 WARN_ON(1);
Josef Bacik850265332011-03-15 14:52:12 -04004571 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05004572 }
4573
4574 p = extent_buffer_page(eb, i);
Chris Masona6591712011-07-19 12:04:14 -04004575 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05004576 *map = kaddr + offset;
4577 *map_len = PAGE_CACHE_SIZE - offset;
4578 return 0;
4579}
Chris Masond1310b22008-01-24 16:13:08 -05004580
Chris Masond1310b22008-01-24 16:13:08 -05004581int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4582 unsigned long start,
4583 unsigned long len)
4584{
4585 size_t cur;
4586 size_t offset;
4587 struct page *page;
4588 char *kaddr;
4589 char *ptr = (char *)ptrv;
4590 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4591 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4592 int ret = 0;
4593
4594 WARN_ON(start > eb->len);
4595 WARN_ON(start + len > eb->start + eb->len);
4596
4597 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4598
Chris Masond3977122009-01-05 21:25:51 -05004599 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004600 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004601
4602 cur = min(len, (PAGE_CACHE_SIZE - offset));
4603
Chris Masona6591712011-07-19 12:04:14 -04004604 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004605 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004606 if (ret)
4607 break;
4608
4609 ptr += cur;
4610 len -= cur;
4611 offset = 0;
4612 i++;
4613 }
4614 return ret;
4615}
Chris Masond1310b22008-01-24 16:13:08 -05004616
4617void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4618 unsigned long start, unsigned long len)
4619{
4620 size_t cur;
4621 size_t offset;
4622 struct page *page;
4623 char *kaddr;
4624 char *src = (char *)srcv;
4625 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4626 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4627
4628 WARN_ON(start > eb->len);
4629 WARN_ON(start + len > eb->start + eb->len);
4630
4631 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4632
Chris Masond3977122009-01-05 21:25:51 -05004633 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004634 page = extent_buffer_page(eb, i);
4635 WARN_ON(!PageUptodate(page));
4636
4637 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04004638 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004639 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004640
4641 src += cur;
4642 len -= cur;
4643 offset = 0;
4644 i++;
4645 }
4646}
Chris Masond1310b22008-01-24 16:13:08 -05004647
4648void memset_extent_buffer(struct extent_buffer *eb, char c,
4649 unsigned long start, unsigned long len)
4650{
4651 size_t cur;
4652 size_t offset;
4653 struct page *page;
4654 char *kaddr;
4655 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4656 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4657
4658 WARN_ON(start > eb->len);
4659 WARN_ON(start + len > eb->start + eb->len);
4660
4661 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4662
Chris Masond3977122009-01-05 21:25:51 -05004663 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004664 page = extent_buffer_page(eb, i);
4665 WARN_ON(!PageUptodate(page));
4666
4667 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04004668 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004669 memset(kaddr + offset, c, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004670
4671 len -= cur;
4672 offset = 0;
4673 i++;
4674 }
4675}
Chris Masond1310b22008-01-24 16:13:08 -05004676
4677void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4678 unsigned long dst_offset, unsigned long src_offset,
4679 unsigned long len)
4680{
4681 u64 dst_len = dst->len;
4682 size_t cur;
4683 size_t offset;
4684 struct page *page;
4685 char *kaddr;
4686 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4687 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4688
4689 WARN_ON(src->len != dst_len);
4690
4691 offset = (start_offset + dst_offset) &
4692 ((unsigned long)PAGE_CACHE_SIZE - 1);
4693
Chris Masond3977122009-01-05 21:25:51 -05004694 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004695 page = extent_buffer_page(dst, i);
4696 WARN_ON(!PageUptodate(page));
4697
4698 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4699
Chris Masona6591712011-07-19 12:04:14 -04004700 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004701 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004702
4703 src_offset += cur;
4704 len -= cur;
4705 offset = 0;
4706 i++;
4707 }
4708}
Chris Masond1310b22008-01-24 16:13:08 -05004709
4710static void move_pages(struct page *dst_page, struct page *src_page,
4711 unsigned long dst_off, unsigned long src_off,
4712 unsigned long len)
4713{
Chris Masona6591712011-07-19 12:04:14 -04004714 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05004715 if (dst_page == src_page) {
4716 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4717 } else {
Chris Masona6591712011-07-19 12:04:14 -04004718 char *src_kaddr = page_address(src_page);
Chris Masond1310b22008-01-24 16:13:08 -05004719 char *p = dst_kaddr + dst_off + len;
4720 char *s = src_kaddr + src_off + len;
4721
4722 while (len--)
4723 *--p = *--s;
Chris Masond1310b22008-01-24 16:13:08 -05004724 }
Chris Masond1310b22008-01-24 16:13:08 -05004725}
4726
Sergei Trofimovich33872062011-04-11 21:52:52 +00004727static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4728{
4729 unsigned long distance = (src > dst) ? src - dst : dst - src;
4730 return distance < len;
4731}
4732
Chris Masond1310b22008-01-24 16:13:08 -05004733static void copy_pages(struct page *dst_page, struct page *src_page,
4734 unsigned long dst_off, unsigned long src_off,
4735 unsigned long len)
4736{
Chris Masona6591712011-07-19 12:04:14 -04004737 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05004738 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04004739 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004740
Sergei Trofimovich33872062011-04-11 21:52:52 +00004741 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04004742 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00004743 } else {
Chris Masond1310b22008-01-24 16:13:08 -05004744 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04004745 if (areas_overlap(src_off, dst_off, len))
4746 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00004747 }
Chris Masond1310b22008-01-24 16:13:08 -05004748
Chris Mason727011e2010-08-06 13:21:20 -04004749 if (must_memmove)
4750 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4751 else
4752 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05004753}
4754
4755void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4756 unsigned long src_offset, unsigned long len)
4757{
4758 size_t cur;
4759 size_t dst_off_in_page;
4760 size_t src_off_in_page;
4761 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4762 unsigned long dst_i;
4763 unsigned long src_i;
4764
4765 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004766 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4767 "len %lu dst len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004768 BUG_ON(1);
4769 }
4770 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004771 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4772 "len %lu dst len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004773 BUG_ON(1);
4774 }
4775
Chris Masond3977122009-01-05 21:25:51 -05004776 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004777 dst_off_in_page = (start_offset + dst_offset) &
4778 ((unsigned long)PAGE_CACHE_SIZE - 1);
4779 src_off_in_page = (start_offset + src_offset) &
4780 ((unsigned long)PAGE_CACHE_SIZE - 1);
4781
4782 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4783 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4784
4785 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4786 src_off_in_page));
4787 cur = min_t(unsigned long, cur,
4788 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4789
4790 copy_pages(extent_buffer_page(dst, dst_i),
4791 extent_buffer_page(dst, src_i),
4792 dst_off_in_page, src_off_in_page, cur);
4793
4794 src_offset += cur;
4795 dst_offset += cur;
4796 len -= cur;
4797 }
4798}
Chris Masond1310b22008-01-24 16:13:08 -05004799
4800void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4801 unsigned long src_offset, unsigned long len)
4802{
4803 size_t cur;
4804 size_t dst_off_in_page;
4805 size_t src_off_in_page;
4806 unsigned long dst_end = dst_offset + len - 1;
4807 unsigned long src_end = src_offset + len - 1;
4808 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4809 unsigned long dst_i;
4810 unsigned long src_i;
4811
4812 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004813 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4814 "len %lu len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004815 BUG_ON(1);
4816 }
4817 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004818 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4819 "len %lu len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004820 BUG_ON(1);
4821 }
Chris Mason727011e2010-08-06 13:21:20 -04004822 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05004823 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4824 return;
4825 }
Chris Masond3977122009-01-05 21:25:51 -05004826 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004827 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4828 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4829
4830 dst_off_in_page = (start_offset + dst_end) &
4831 ((unsigned long)PAGE_CACHE_SIZE - 1);
4832 src_off_in_page = (start_offset + src_end) &
4833 ((unsigned long)PAGE_CACHE_SIZE - 1);
4834
4835 cur = min_t(unsigned long, len, src_off_in_page + 1);
4836 cur = min(cur, dst_off_in_page + 1);
4837 move_pages(extent_buffer_page(dst, dst_i),
4838 extent_buffer_page(dst, src_i),
4839 dst_off_in_page - cur + 1,
4840 src_off_in_page - cur + 1, cur);
4841
4842 dst_end -= cur;
4843 src_end -= cur;
4844 len -= cur;
4845 }
4846}
Chris Mason6af118c2008-07-22 11:18:07 -04004847
Josef Bacik3083ee22012-03-09 16:01:49 -05004848int try_release_extent_buffer(struct page *page, gfp_t mask)
Miao Xie19fe0a82010-10-26 20:57:29 -04004849{
Chris Mason6af118c2008-07-22 11:18:07 -04004850 struct extent_buffer *eb;
Miao Xie897ca6e2010-10-26 20:57:29 -04004851
Miao Xie19fe0a82010-10-26 20:57:29 -04004852 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05004853 * We need to make sure noboody is attaching this page to an eb right
4854 * now.
Miao Xie19fe0a82010-10-26 20:57:29 -04004855 */
Josef Bacik3083ee22012-03-09 16:01:49 -05004856 spin_lock(&page->mapping->private_lock);
4857 if (!PagePrivate(page)) {
4858 spin_unlock(&page->mapping->private_lock);
4859 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04004860 }
4861
Josef Bacik3083ee22012-03-09 16:01:49 -05004862 eb = (struct extent_buffer *)page->private;
4863 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04004864
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004865 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05004866 * This is a little awful but should be ok, we need to make sure that
4867 * the eb doesn't disappear out from under us while we're looking at
4868 * this page.
4869 */
4870 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004871 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05004872 spin_unlock(&eb->refs_lock);
4873 spin_unlock(&page->mapping->private_lock);
4874 return 0;
4875 }
4876 spin_unlock(&page->mapping->private_lock);
4877
4878 if ((mask & GFP_NOFS) == GFP_NOFS)
4879 mask = GFP_NOFS;
4880
4881 /*
4882 * If tree ref isn't set then we know the ref on this eb is a real ref,
4883 * so just return, this page will likely be freed soon anyway.
4884 */
4885 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4886 spin_unlock(&eb->refs_lock);
4887 return 0;
4888 }
4889 release_extent_buffer(eb, mask);
4890
4891 return 1;
Chris Mason6af118c2008-07-22 11:18:07 -04004892}