blob: 067b1747421bfd655b2d0ddbd228f195cca65553 [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05005#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050011#include <linux/writeback.h>
12#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070013#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060014#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050015#include "extent_io.h"
16#include "extent_map.h"
David Woodhouse2db04962008-08-07 11:19:43 -040017#include "compat.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040018#include "ctree.h"
19#include "btrfs_inode.h"
Chris Masond1310b22008-01-24 16:13:08 -050020
Chris Masond1310b22008-01-24 16:13:08 -050021static struct kmem_cache *extent_state_cache;
22static struct kmem_cache *extent_buffer_cache;
23
24static LIST_HEAD(buffers);
25static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040026
Chris Masonb47eda82008-11-10 12:34:40 -050027#define LEAK_DEBUG 0
Chris Mason39351272009-02-04 09:24:05 -050028#if LEAK_DEBUG
Chris Masond3977122009-01-05 21:25:51 -050029static DEFINE_SPINLOCK(leak_lock);
Chris Mason4bef0842008-09-08 11:18:08 -040030#endif
Chris Masond1310b22008-01-24 16:13:08 -050031
Chris Masond1310b22008-01-24 16:13:08 -050032#define BUFFER_LRU_MAX 64
33
34struct tree_entry {
35 u64 start;
36 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -050037 struct rb_node rb_node;
38};
39
40struct extent_page_data {
41 struct bio *bio;
42 struct extent_io_tree *tree;
43 get_extent_t *get_extent;
Chris Mason771ed682008-11-06 22:02:51 -050044
45 /* tells writepage not to lock the state bits for this range
46 * it still does the unlocking
47 */
Chris Masonffbd5172009-04-20 15:50:09 -040048 unsigned int extent_locked:1;
49
50 /* tells the submit_bio code to use a WRITE_SYNC */
51 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -050052};
53
54int __init extent_io_init(void)
55{
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020056 extent_state_cache = kmem_cache_create("extent_state",
57 sizeof(struct extent_state), 0,
58 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050059 if (!extent_state_cache)
60 return -ENOMEM;
61
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020062 extent_buffer_cache = kmem_cache_create("extent_buffers",
63 sizeof(struct extent_buffer), 0,
64 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050065 if (!extent_buffer_cache)
66 goto free_state_cache;
67 return 0;
68
69free_state_cache:
70 kmem_cache_destroy(extent_state_cache);
71 return -ENOMEM;
72}
73
74void extent_io_exit(void)
75{
76 struct extent_state *state;
Chris Mason2d2ae542008-03-26 16:24:23 -040077 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -050078
79 while (!list_empty(&states)) {
Chris Mason2d2ae542008-03-26 16:24:23 -040080 state = list_entry(states.next, struct extent_state, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050081 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
82 "state %lu in tree %p refs %d\n",
83 (unsigned long long)state->start,
84 (unsigned long long)state->end,
85 state->state, state->tree, atomic_read(&state->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040086 list_del(&state->leak_list);
Chris Masond1310b22008-01-24 16:13:08 -050087 kmem_cache_free(extent_state_cache, state);
88
89 }
90
Chris Mason2d2ae542008-03-26 16:24:23 -040091 while (!list_empty(&buffers)) {
92 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050093 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
94 "refs %d\n", (unsigned long long)eb->start,
95 eb->len, atomic_read(&eb->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040096 list_del(&eb->leak_list);
97 kmem_cache_free(extent_buffer_cache, eb);
98 }
Chris Masond1310b22008-01-24 16:13:08 -050099 if (extent_state_cache)
100 kmem_cache_destroy(extent_state_cache);
101 if (extent_buffer_cache)
102 kmem_cache_destroy(extent_buffer_cache);
103}
104
105void extent_io_tree_init(struct extent_io_tree *tree,
David Sterbaf993c882011-04-20 23:35:57 +0200106 struct address_space *mapping)
Chris Masond1310b22008-01-24 16:13:08 -0500107{
Eric Paris6bef4d32010-02-23 19:43:04 +0000108 tree->state = RB_ROOT;
Miao Xie19fe0a82010-10-26 20:57:29 -0400109 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500110 tree->ops = NULL;
111 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500112 spin_lock_init(&tree->lock);
Chris Mason6af118c2008-07-22 11:18:07 -0400113 spin_lock_init(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -0500114 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500115}
Chris Masond1310b22008-01-24 16:13:08 -0500116
Christoph Hellwigb2950862008-12-02 09:54:17 -0500117static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500118{
119 struct extent_state *state;
Chris Mason39351272009-02-04 09:24:05 -0500120#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400121 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400122#endif
Chris Masond1310b22008-01-24 16:13:08 -0500123
124 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400125 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500126 return state;
127 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500128 state->private = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500129 state->tree = NULL;
Chris Mason39351272009-02-04 09:24:05 -0500130#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400131 spin_lock_irqsave(&leak_lock, flags);
132 list_add(&state->leak_list, &states);
133 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400134#endif
Chris Masond1310b22008-01-24 16:13:08 -0500135 atomic_set(&state->refs, 1);
136 init_waitqueue_head(&state->wq);
137 return state;
138}
Chris Masond1310b22008-01-24 16:13:08 -0500139
Chris Mason4845e442010-05-25 20:56:50 -0400140void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500141{
Chris Masond1310b22008-01-24 16:13:08 -0500142 if (!state)
143 return;
144 if (atomic_dec_and_test(&state->refs)) {
Chris Mason39351272009-02-04 09:24:05 -0500145#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400146 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400147#endif
Chris Mason70dec802008-01-29 09:59:12 -0500148 WARN_ON(state->tree);
Chris Mason39351272009-02-04 09:24:05 -0500149#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400150 spin_lock_irqsave(&leak_lock, flags);
151 list_del(&state->leak_list);
152 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400153#endif
Chris Masond1310b22008-01-24 16:13:08 -0500154 kmem_cache_free(extent_state_cache, state);
155 }
156}
Chris Masond1310b22008-01-24 16:13:08 -0500157
158static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 struct rb_node *node)
160{
Chris Masond3977122009-01-05 21:25:51 -0500161 struct rb_node **p = &root->rb_node;
162 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500163 struct tree_entry *entry;
164
Chris Masond3977122009-01-05 21:25:51 -0500165 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500166 parent = *p;
167 entry = rb_entry(parent, struct tree_entry, rb_node);
168
169 if (offset < entry->start)
170 p = &(*p)->rb_left;
171 else if (offset > entry->end)
172 p = &(*p)->rb_right;
173 else
174 return parent;
175 }
176
177 entry = rb_entry(node, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500178 rb_link_node(node, parent, p);
179 rb_insert_color(node, root);
180 return NULL;
181}
182
Chris Mason80ea96b2008-02-01 14:51:59 -0500183static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Chris Masond1310b22008-01-24 16:13:08 -0500184 struct rb_node **prev_ret,
185 struct rb_node **next_ret)
186{
Chris Mason80ea96b2008-02-01 14:51:59 -0500187 struct rb_root *root = &tree->state;
Chris Masond3977122009-01-05 21:25:51 -0500188 struct rb_node *n = root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500189 struct rb_node *prev = NULL;
190 struct rb_node *orig_prev = NULL;
191 struct tree_entry *entry;
192 struct tree_entry *prev_entry = NULL;
193
Chris Masond3977122009-01-05 21:25:51 -0500194 while (n) {
Chris Masond1310b22008-01-24 16:13:08 -0500195 entry = rb_entry(n, struct tree_entry, rb_node);
196 prev = n;
197 prev_entry = entry;
198
199 if (offset < entry->start)
200 n = n->rb_left;
201 else if (offset > entry->end)
202 n = n->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500203 else
Chris Masond1310b22008-01-24 16:13:08 -0500204 return n;
205 }
206
207 if (prev_ret) {
208 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500209 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500210 prev = rb_next(prev);
211 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
212 }
213 *prev_ret = prev;
214 prev = orig_prev;
215 }
216
217 if (next_ret) {
218 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500219 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500220 prev = rb_prev(prev);
221 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222 }
223 *next_ret = prev;
224 }
225 return NULL;
226}
227
Chris Mason80ea96b2008-02-01 14:51:59 -0500228static inline struct rb_node *tree_search(struct extent_io_tree *tree,
229 u64 offset)
Chris Masond1310b22008-01-24 16:13:08 -0500230{
Chris Mason70dec802008-01-29 09:59:12 -0500231 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500232 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500233
Chris Mason80ea96b2008-02-01 14:51:59 -0500234 ret = __etree_search(tree, offset, &prev, NULL);
Chris Masond3977122009-01-05 21:25:51 -0500235 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500236 return prev;
237 return ret;
238}
239
Josef Bacik9ed74f22009-09-11 16:12:44 -0400240static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
241 struct extent_state *other)
242{
243 if (tree->ops && tree->ops->merge_extent_hook)
244 tree->ops->merge_extent_hook(tree->mapping->host, new,
245 other);
246}
247
Chris Masond1310b22008-01-24 16:13:08 -0500248/*
249 * utility function to look for merge candidates inside a given range.
250 * Any extents with matching state are merged together into a single
251 * extent in the tree. Extents with EXTENT_IO in their state field
252 * are not merged because the end_io handlers need to be able to do
253 * operations on them without sleeping (or doing allocations/splits).
254 *
255 * This should be called with the tree lock held.
256 */
257static int merge_state(struct extent_io_tree *tree,
258 struct extent_state *state)
259{
260 struct extent_state *other;
261 struct rb_node *other_node;
262
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400263 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Chris Masond1310b22008-01-24 16:13:08 -0500264 return 0;
265
266 other_node = rb_prev(&state->rb_node);
267 if (other_node) {
268 other = rb_entry(other_node, struct extent_state, rb_node);
269 if (other->end == state->start - 1 &&
270 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400271 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500272 state->start = other->start;
Chris Mason70dec802008-01-29 09:59:12 -0500273 other->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500274 rb_erase(&other->rb_node, &tree->state);
275 free_extent_state(other);
276 }
277 }
278 other_node = rb_next(&state->rb_node);
279 if (other_node) {
280 other = rb_entry(other_node, struct extent_state, rb_node);
281 if (other->start == state->end + 1 &&
282 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400283 merge_cb(tree, state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400284 state->end = other->end;
285 other->tree = NULL;
286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500288 }
289 }
Josef Bacik9ed74f22009-09-11 16:12:44 -0400290
Chris Masond1310b22008-01-24 16:13:08 -0500291 return 0;
292}
293
Josef Bacik9ed74f22009-09-11 16:12:44 -0400294static int set_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400295 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500296{
297 if (tree->ops && tree->ops->set_bit_hook) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400298 return tree->ops->set_bit_hook(tree->mapping->host,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400299 state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500300 }
Josef Bacik9ed74f22009-09-11 16:12:44 -0400301
302 return 0;
Chris Mason291d6732008-01-29 15:55:23 -0500303}
304
305static void clear_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400306 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500307{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400308 if (tree->ops && tree->ops->clear_bit_hook)
309 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500310}
311
Chris Masond1310b22008-01-24 16:13:08 -0500312/*
313 * insert an extent_state struct into the tree. 'bits' are set on the
314 * struct before it is inserted.
315 *
316 * This may return -EEXIST if the extent is already there, in which case the
317 * state struct is freed.
318 *
319 * The tree lock is not taken internally. This is a utility function and
320 * probably isn't what you want to call (see set/clear_extent_bit).
321 */
322static int insert_state(struct extent_io_tree *tree,
323 struct extent_state *state, u64 start, u64 end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400324 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500325{
326 struct rb_node *node;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400327 int bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400328 int ret;
Chris Masond1310b22008-01-24 16:13:08 -0500329
330 if (end < start) {
Chris Masond3977122009-01-05 21:25:51 -0500331 printk(KERN_ERR "btrfs end < start %llu %llu\n",
332 (unsigned long long)end,
333 (unsigned long long)start);
Chris Masond1310b22008-01-24 16:13:08 -0500334 WARN_ON(1);
335 }
Chris Masond1310b22008-01-24 16:13:08 -0500336 state->start = start;
337 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400338 ret = set_state_cb(tree, state, bits);
339 if (ret)
340 return ret;
341
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400342 if (bits_to_set & EXTENT_DIRTY)
Josef Bacik9ed74f22009-09-11 16:12:44 -0400343 tree->dirty_bytes += end - start + 1;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400344 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500345 node = tree_insert(&tree->state, end, &state->rb_node);
346 if (node) {
347 struct extent_state *found;
348 found = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500349 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
350 "%llu %llu\n", (unsigned long long)found->start,
351 (unsigned long long)found->end,
352 (unsigned long long)start, (unsigned long long)end);
Chris Masond1310b22008-01-24 16:13:08 -0500353 return -EEXIST;
354 }
Chris Mason70dec802008-01-29 09:59:12 -0500355 state->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500356 merge_state(tree, state);
357 return 0;
358}
359
Josef Bacik9ed74f22009-09-11 16:12:44 -0400360static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
361 u64 split)
362{
363 if (tree->ops && tree->ops->split_extent_hook)
364 return tree->ops->split_extent_hook(tree->mapping->host,
365 orig, split);
366 return 0;
367}
368
Chris Masond1310b22008-01-24 16:13:08 -0500369/*
370 * split a given extent state struct in two, inserting the preallocated
371 * struct 'prealloc' as the newly created second half. 'split' indicates an
372 * offset inside 'orig' where it should be split.
373 *
374 * Before calling,
375 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
376 * are two extent state structs in the tree:
377 * prealloc: [orig->start, split - 1]
378 * orig: [ split, orig->end ]
379 *
380 * The tree locks are not taken by this function. They need to be held
381 * by the caller.
382 */
383static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
384 struct extent_state *prealloc, u64 split)
385{
386 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400387
388 split_cb(tree, orig, split);
389
Chris Masond1310b22008-01-24 16:13:08 -0500390 prealloc->start = orig->start;
391 prealloc->end = split - 1;
392 prealloc->state = orig->state;
393 orig->start = split;
394
395 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
396 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500397 free_extent_state(prealloc);
398 return -EEXIST;
399 }
Chris Mason70dec802008-01-29 09:59:12 -0500400 prealloc->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500401 return 0;
402}
403
404/*
405 * utility function to clear some bits in an extent state struct.
406 * it will optionally wake up any one waiting on this state (wake == 1), or
407 * forcibly remove the state from the tree (delete == 1).
408 *
409 * If no bits are set on the state struct after clearing things, the
410 * struct is freed and removed from the tree
411 */
412static int clear_state_bit(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400413 struct extent_state *state,
414 int *bits, int wake)
Chris Masond1310b22008-01-24 16:13:08 -0500415{
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400416 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
Josef Bacik32c00af2009-10-08 13:34:05 -0400417 int ret = state->state & bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500418
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400419 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500420 u64 range = state->end - state->start + 1;
421 WARN_ON(range > tree->dirty_bytes);
422 tree->dirty_bytes -= range;
423 }
Chris Mason291d6732008-01-29 15:55:23 -0500424 clear_state_cb(tree, state, bits);
Josef Bacik32c00af2009-10-08 13:34:05 -0400425 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500426 if (wake)
427 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400428 if (state->state == 0) {
Chris Mason70dec802008-01-29 09:59:12 -0500429 if (state->tree) {
Chris Masond1310b22008-01-24 16:13:08 -0500430 rb_erase(&state->rb_node, &tree->state);
Chris Mason70dec802008-01-29 09:59:12 -0500431 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500432 free_extent_state(state);
433 } else {
434 WARN_ON(1);
435 }
436 } else {
437 merge_state(tree, state);
438 }
439 return ret;
440}
441
Xiao Guangrong82337672011-04-20 06:44:57 +0000442static struct extent_state *
443alloc_extent_state_atomic(struct extent_state *prealloc)
444{
445 if (!prealloc)
446 prealloc = alloc_extent_state(GFP_ATOMIC);
447
448 return prealloc;
449}
450
Chris Masond1310b22008-01-24 16:13:08 -0500451/*
452 * clear some bits on a range in the tree. This may require splitting
453 * or inserting elements in the tree, so the gfp mask is used to
454 * indicate which allocations or sleeping are allowed.
455 *
456 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
457 * the given range from the tree regardless of state (ie for truncate).
458 *
459 * the range [start, end] is inclusive.
460 *
461 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
462 * bits were already set, or zero if none of the bits were already set.
463 */
464int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -0400465 int bits, int wake, int delete,
466 struct extent_state **cached_state,
467 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500468{
469 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400470 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500471 struct extent_state *prealloc = NULL;
Chris Mason2c64c532009-09-02 15:04:12 -0400472 struct rb_node *next_node;
Chris Masond1310b22008-01-24 16:13:08 -0500473 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400474 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500475 int err;
476 int set = 0;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000477 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500478
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400479 if (delete)
480 bits |= ~EXTENT_CTLBITS;
481 bits |= EXTENT_FIRST_DELALLOC;
482
Josef Bacik2ac55d42010-02-03 19:33:23 +0000483 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
484 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500485again:
486 if (!prealloc && (mask & __GFP_WAIT)) {
487 prealloc = alloc_extent_state(mask);
488 if (!prealloc)
489 return -ENOMEM;
490 }
491
Chris Masoncad321a2008-12-17 14:51:42 -0500492 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400493 if (cached_state) {
494 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000495
496 if (clear) {
497 *cached_state = NULL;
498 cached_state = NULL;
499 }
500
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400501 if (cached && cached->tree && cached->start <= start &&
502 cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000503 if (clear)
504 atomic_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400505 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400506 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400507 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000508 if (clear)
509 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400510 }
Chris Masond1310b22008-01-24 16:13:08 -0500511 /*
512 * this search will find the extents that end after
513 * our range starts
514 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500515 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500516 if (!node)
517 goto out;
518 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400519hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500520 if (state->start > end)
521 goto out;
522 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400523 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500524
525 /*
526 * | ---- desired range ---- |
527 * | state | or
528 * | ------------- state -------------- |
529 *
530 * We need to split the extent we found, and may flip
531 * bits on second half.
532 *
533 * If the extent we found extends past our range, we
534 * just split and search again. It'll get split again
535 * the next time though.
536 *
537 * If the extent we found is inside our range, we clear
538 * the desired bit on it.
539 */
540
541 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000542 prealloc = alloc_extent_state_atomic(prealloc);
543 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500544 err = split_state(tree, state, prealloc, start);
545 BUG_ON(err == -EEXIST);
546 prealloc = NULL;
547 if (err)
548 goto out;
549 if (state->end <= end) {
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400550 set |= clear_state_bit(tree, state, &bits, wake);
Yan Zheng5c939df2009-05-27 09:16:03 -0400551 if (last_end == (u64)-1)
552 goto out;
553 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500554 }
555 goto search_again;
556 }
557 /*
558 * | ---- desired range ---- |
559 * | state |
560 * We need to split the extent, and clear the bit
561 * on the first half
562 */
563 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000564 prealloc = alloc_extent_state_atomic(prealloc);
565 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500566 err = split_state(tree, state, prealloc, end + 1);
567 BUG_ON(err == -EEXIST);
Chris Masond1310b22008-01-24 16:13:08 -0500568 if (wake)
569 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400570
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400571 set |= clear_state_bit(tree, prealloc, &bits, wake);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400572
Chris Masond1310b22008-01-24 16:13:08 -0500573 prealloc = NULL;
574 goto out;
575 }
Chris Mason42daec22009-09-23 19:51:09 -0400576
Chris Mason2c64c532009-09-02 15:04:12 -0400577 if (state->end < end && prealloc && !need_resched())
578 next_node = rb_next(&state->rb_node);
579 else
580 next_node = NULL;
Chris Mason42daec22009-09-23 19:51:09 -0400581
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400582 set |= clear_state_bit(tree, state, &bits, wake);
Yan Zheng5c939df2009-05-27 09:16:03 -0400583 if (last_end == (u64)-1)
584 goto out;
585 start = last_end + 1;
Chris Mason2c64c532009-09-02 15:04:12 -0400586 if (start <= end && next_node) {
587 state = rb_entry(next_node, struct extent_state,
588 rb_node);
589 if (state->start == start)
590 goto hit_next;
591 }
Chris Masond1310b22008-01-24 16:13:08 -0500592 goto search_again;
593
594out:
Chris Masoncad321a2008-12-17 14:51:42 -0500595 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500596 if (prealloc)
597 free_extent_state(prealloc);
598
599 return set;
600
601search_again:
602 if (start > end)
603 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500604 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500605 if (mask & __GFP_WAIT)
606 cond_resched();
607 goto again;
608}
Chris Masond1310b22008-01-24 16:13:08 -0500609
610static int wait_on_state(struct extent_io_tree *tree,
611 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500612 __releases(tree->lock)
613 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500614{
615 DEFINE_WAIT(wait);
616 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500617 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500618 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500619 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500620 finish_wait(&state->wq, &wait);
621 return 0;
622}
623
624/*
625 * waits for one or more bits to clear on a range in the state tree.
626 * The range [start, end] is inclusive.
627 * The tree lock is taken by this function
628 */
629int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
630{
631 struct extent_state *state;
632 struct rb_node *node;
633
Chris Masoncad321a2008-12-17 14:51:42 -0500634 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500635again:
636 while (1) {
637 /*
638 * this search will find all the extents that end after
639 * our range starts
640 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500641 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500642 if (!node)
643 break;
644
645 state = rb_entry(node, struct extent_state, rb_node);
646
647 if (state->start > end)
648 goto out;
649
650 if (state->state & bits) {
651 start = state->start;
652 atomic_inc(&state->refs);
653 wait_on_state(tree, state);
654 free_extent_state(state);
655 goto again;
656 }
657 start = state->end + 1;
658
659 if (start > end)
660 break;
661
662 if (need_resched()) {
Chris Masoncad321a2008-12-17 14:51:42 -0500663 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500664 cond_resched();
Chris Masoncad321a2008-12-17 14:51:42 -0500665 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500666 }
667 }
668out:
Chris Masoncad321a2008-12-17 14:51:42 -0500669 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500670 return 0;
671}
Chris Masond1310b22008-01-24 16:13:08 -0500672
Josef Bacik9ed74f22009-09-11 16:12:44 -0400673static int set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500674 struct extent_state *state,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400675 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500676{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400677 int ret;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400678 int bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400679
680 ret = set_state_cb(tree, state, bits);
681 if (ret)
682 return ret;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400683 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500684 u64 range = state->end - state->start + 1;
685 tree->dirty_bytes += range;
686 }
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400687 state->state |= bits_to_set;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400688
689 return 0;
Chris Masond1310b22008-01-24 16:13:08 -0500690}
691
Chris Mason2c64c532009-09-02 15:04:12 -0400692static void cache_state(struct extent_state *state,
693 struct extent_state **cached_ptr)
694{
695 if (cached_ptr && !(*cached_ptr)) {
696 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
697 *cached_ptr = state;
698 atomic_inc(&state->refs);
699 }
700 }
701}
702
Arne Jansen507903b2011-04-06 10:02:20 +0000703static void uncache_state(struct extent_state **cached_ptr)
704{
705 if (cached_ptr && (*cached_ptr)) {
706 struct extent_state *state = *cached_ptr;
Chris Mason109b36a2011-04-12 13:57:39 -0400707 *cached_ptr = NULL;
708 free_extent_state(state);
Arne Jansen507903b2011-04-06 10:02:20 +0000709 }
710}
711
Chris Masond1310b22008-01-24 16:13:08 -0500712/*
Chris Mason1edbb732009-09-02 13:24:36 -0400713 * set some bits on a range in the tree. This may require allocations or
714 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500715 *
Chris Mason1edbb732009-09-02 13:24:36 -0400716 * If any of the exclusive bits are set, this will fail with -EEXIST if some
717 * part of the range already has the desired bits set. The start of the
718 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500719 *
Chris Mason1edbb732009-09-02 13:24:36 -0400720 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500721 */
Chris Mason1edbb732009-09-02 13:24:36 -0400722
Chris Mason4845e442010-05-25 20:56:50 -0400723int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
724 int bits, int exclusive_bits, u64 *failed_start,
725 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500726{
727 struct extent_state *state;
728 struct extent_state *prealloc = NULL;
729 struct rb_node *node;
Chris Masond1310b22008-01-24 16:13:08 -0500730 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500731 u64 last_start;
732 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400733
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400734 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500735again:
736 if (!prealloc && (mask & __GFP_WAIT)) {
737 prealloc = alloc_extent_state(mask);
Xiao Guangrong82337672011-04-20 06:44:57 +0000738 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500739 }
740
Chris Masoncad321a2008-12-17 14:51:42 -0500741 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400742 if (cached_state && *cached_state) {
743 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400744 if (state->start <= start && state->end > start &&
745 state->tree) {
Chris Mason9655d292009-09-02 15:22:30 -0400746 node = &state->rb_node;
747 goto hit_next;
748 }
749 }
Chris Masond1310b22008-01-24 16:13:08 -0500750 /*
751 * this search will find all the extents that end after
752 * our range starts.
753 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500754 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500755 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000756 prealloc = alloc_extent_state_atomic(prealloc);
757 BUG_ON(!prealloc);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400758 err = insert_state(tree, prealloc, start, end, &bits);
Chris Masond1310b22008-01-24 16:13:08 -0500759 prealloc = NULL;
760 BUG_ON(err == -EEXIST);
761 goto out;
762 }
Chris Masond1310b22008-01-24 16:13:08 -0500763 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400764hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500765 last_start = state->start;
766 last_end = state->end;
767
768 /*
769 * | ---- desired range ---- |
770 * | state |
771 *
772 * Just lock what we found and keep going
773 */
774 if (state->start == start && state->end <= end) {
Chris Mason40431d62009-08-05 12:57:59 -0400775 struct rb_node *next_node;
Chris Mason1edbb732009-09-02 13:24:36 -0400776 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500777 *failed_start = state->start;
778 err = -EEXIST;
779 goto out;
780 }
Chris Mason42daec22009-09-23 19:51:09 -0400781
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400782 err = set_state_bits(tree, state, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400783 if (err)
784 goto out;
785
Chris Mason2c64c532009-09-02 15:04:12 -0400786 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500787 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400788 if (last_end == (u64)-1)
789 goto out;
Chris Mason40431d62009-08-05 12:57:59 -0400790
Yan Zheng5c939df2009-05-27 09:16:03 -0400791 start = last_end + 1;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400792 next_node = rb_next(&state->rb_node);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000793 if (next_node && start < end && prealloc && !need_resched()) {
794 state = rb_entry(next_node, struct extent_state,
795 rb_node);
796 if (state->start == start)
797 goto hit_next;
Chris Mason40431d62009-08-05 12:57:59 -0400798 }
Chris Masond1310b22008-01-24 16:13:08 -0500799 goto search_again;
800 }
801
802 /*
803 * | ---- desired range ---- |
804 * | state |
805 * or
806 * | ------------- state -------------- |
807 *
808 * We need to split the extent we found, and may flip bits on
809 * second half.
810 *
811 * If the extent we found extends past our
812 * range, we just split and search again. It'll get split
813 * again the next time though.
814 *
815 * If the extent we found is inside our range, we set the
816 * desired bit on it.
817 */
818 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400819 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500820 *failed_start = start;
821 err = -EEXIST;
822 goto out;
823 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000824
825 prealloc = alloc_extent_state_atomic(prealloc);
826 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500827 err = split_state(tree, state, prealloc, start);
828 BUG_ON(err == -EEXIST);
829 prealloc = NULL;
830 if (err)
831 goto out;
832 if (state->end <= end) {
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400833 err = set_state_bits(tree, state, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400834 if (err)
835 goto out;
Chris Mason2c64c532009-09-02 15:04:12 -0400836 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500837 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400838 if (last_end == (u64)-1)
839 goto out;
840 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500841 }
842 goto search_again;
843 }
844 /*
845 * | ---- desired range ---- |
846 * | state | or | state |
847 *
848 * There's a hole, we need to insert something in it and
849 * ignore the extent we found.
850 */
851 if (state->start > start) {
852 u64 this_end;
853 if (end < last_start)
854 this_end = end;
855 else
Chris Masond3977122009-01-05 21:25:51 -0500856 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +0000857
858 prealloc = alloc_extent_state_atomic(prealloc);
859 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000860
861 /*
862 * Avoid to free 'prealloc' if it can be merged with
863 * the later extent.
864 */
Chris Masond1310b22008-01-24 16:13:08 -0500865 err = insert_state(tree, prealloc, start, this_end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400866 &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400867 BUG_ON(err == -EEXIST);
868 if (err) {
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000869 free_extent_state(prealloc);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400870 prealloc = NULL;
871 goto out;
872 }
Chris Mason2c64c532009-09-02 15:04:12 -0400873 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500874 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500875 start = this_end + 1;
876 goto search_again;
877 }
878 /*
879 * | ---- desired range ---- |
880 * | state |
881 * We need to split the extent, and set the bit
882 * on the first half
883 */
884 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400885 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500886 *failed_start = start;
887 err = -EEXIST;
888 goto out;
889 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000890
891 prealloc = alloc_extent_state_atomic(prealloc);
892 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500893 err = split_state(tree, state, prealloc, end + 1);
894 BUG_ON(err == -EEXIST);
895
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400896 err = set_state_bits(tree, prealloc, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400897 if (err) {
898 prealloc = NULL;
899 goto out;
900 }
Chris Mason2c64c532009-09-02 15:04:12 -0400901 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500902 merge_state(tree, prealloc);
903 prealloc = NULL;
904 goto out;
905 }
906
907 goto search_again;
908
909out:
Chris Masoncad321a2008-12-17 14:51:42 -0500910 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500911 if (prealloc)
912 free_extent_state(prealloc);
913
914 return err;
915
916search_again:
917 if (start > end)
918 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500919 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500920 if (mask & __GFP_WAIT)
921 cond_resched();
922 goto again;
923}
Chris Masond1310b22008-01-24 16:13:08 -0500924
925/* wrappers around set/clear extent bit */
926int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
927 gfp_t mask)
928{
929 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -0400930 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500931}
Chris Masond1310b22008-01-24 16:13:08 -0500932
933int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
934 int bits, gfp_t mask)
935{
936 return set_extent_bit(tree, start, end, bits, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -0400937 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500938}
Chris Masond1310b22008-01-24 16:13:08 -0500939
940int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
941 int bits, gfp_t mask)
942{
Chris Mason2c64c532009-09-02 15:04:12 -0400943 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500944}
Chris Masond1310b22008-01-24 16:13:08 -0500945
946int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000947 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500948{
949 return set_extent_bit(tree, start, end,
Chris Mason40431d62009-08-05 12:57:59 -0400950 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000951 0, NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500952}
Chris Masond1310b22008-01-24 16:13:08 -0500953
954int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
955 gfp_t mask)
956{
957 return clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -0400958 EXTENT_DIRTY | EXTENT_DELALLOC |
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400959 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500960}
Chris Masond1310b22008-01-24 16:13:08 -0500961
962int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
963 gfp_t mask)
964{
965 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -0400966 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500967}
Chris Masond1310b22008-01-24 16:13:08 -0500968
Chris Masond1310b22008-01-24 16:13:08 -0500969int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
Arne Jansen507903b2011-04-06 10:02:20 +0000970 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500971{
Arne Jansen507903b2011-04-06 10:02:20 +0000972 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
973 NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500974}
Chris Masond1310b22008-01-24 16:13:08 -0500975
Chris Masond3977122009-01-05 21:25:51 -0500976static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000977 u64 end, struct extent_state **cached_state,
978 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500979{
Chris Mason2c64c532009-09-02 15:04:12 -0400980 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000981 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500982}
Chris Masond1310b22008-01-24 16:13:08 -0500983
Chris Masond352ac62008-09-29 15:18:18 -0400984/*
985 * either insert or lock state struct between start and end use mask to tell
986 * us if waiting is desired.
987 */
Chris Mason1edbb732009-09-02 13:24:36 -0400988int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -0400989 int bits, struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500990{
991 int err;
992 u64 failed_start;
993 while (1) {
Chris Mason1edbb732009-09-02 13:24:36 -0400994 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
Chris Mason2c64c532009-09-02 15:04:12 -0400995 EXTENT_LOCKED, &failed_start,
996 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500997 if (err == -EEXIST && (mask & __GFP_WAIT)) {
998 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
999 start = failed_start;
1000 } else {
1001 break;
1002 }
1003 WARN_ON(start > end);
1004 }
1005 return err;
1006}
Chris Masond1310b22008-01-24 16:13:08 -05001007
Chris Mason1edbb732009-09-02 13:24:36 -04001008int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1009{
Chris Mason2c64c532009-09-02 15:04:12 -04001010 return lock_extent_bits(tree, start, end, 0, NULL, mask);
Chris Mason1edbb732009-09-02 13:24:36 -04001011}
1012
Josef Bacik25179202008-10-29 14:49:05 -04001013int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1014 gfp_t mask)
1015{
1016 int err;
1017 u64 failed_start;
1018
Chris Mason2c64c532009-09-02 15:04:12 -04001019 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1020 &failed_start, NULL, mask);
Yan Zheng66435582008-10-30 14:19:50 -04001021 if (err == -EEXIST) {
1022 if (failed_start > start)
1023 clear_extent_bit(tree, start, failed_start - 1,
Chris Mason2c64c532009-09-02 15:04:12 -04001024 EXTENT_LOCKED, 1, 0, NULL, mask);
Josef Bacik25179202008-10-29 14:49:05 -04001025 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001026 }
Josef Bacik25179202008-10-29 14:49:05 -04001027 return 1;
1028}
Josef Bacik25179202008-10-29 14:49:05 -04001029
Chris Mason2c64c532009-09-02 15:04:12 -04001030int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1031 struct extent_state **cached, gfp_t mask)
1032{
1033 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1034 mask);
1035}
1036
Arne Jansen507903b2011-04-06 10:02:20 +00001037int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001038{
Chris Mason2c64c532009-09-02 15:04:12 -04001039 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1040 mask);
Chris Masond1310b22008-01-24 16:13:08 -05001041}
Chris Masond1310b22008-01-24 16:13:08 -05001042
1043/*
Chris Masond1310b22008-01-24 16:13:08 -05001044 * helper function to set both pages and extents in the tree writeback
1045 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05001046static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001047{
1048 unsigned long index = start >> PAGE_CACHE_SHIFT;
1049 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1050 struct page *page;
1051
1052 while (index <= end_index) {
1053 page = find_get_page(tree->mapping, index);
1054 BUG_ON(!page);
1055 set_page_writeback(page);
1056 page_cache_release(page);
1057 index++;
1058 }
Chris Masond1310b22008-01-24 16:13:08 -05001059 return 0;
1060}
Chris Masond1310b22008-01-24 16:13:08 -05001061
Chris Masond352ac62008-09-29 15:18:18 -04001062/*
1063 * find the first offset in the io tree with 'bits' set. zero is
1064 * returned if we find something, and *start_ret and *end_ret are
1065 * set to reflect the state struct that was found.
1066 *
1067 * If nothing was found, 1 is returned, < 0 on error
1068 */
Chris Masond1310b22008-01-24 16:13:08 -05001069int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1070 u64 *start_ret, u64 *end_ret, int bits)
1071{
1072 struct rb_node *node;
1073 struct extent_state *state;
1074 int ret = 1;
1075
Chris Masoncad321a2008-12-17 14:51:42 -05001076 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001077 /*
1078 * this search will find all the extents that end after
1079 * our range starts.
1080 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001081 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001082 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001083 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001084
Chris Masond3977122009-01-05 21:25:51 -05001085 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001086 state = rb_entry(node, struct extent_state, rb_node);
1087 if (state->end >= start && (state->state & bits)) {
1088 *start_ret = state->start;
1089 *end_ret = state->end;
1090 ret = 0;
1091 break;
1092 }
1093 node = rb_next(node);
1094 if (!node)
1095 break;
1096 }
1097out:
Chris Masoncad321a2008-12-17 14:51:42 -05001098 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001099 return ret;
1100}
Chris Masond1310b22008-01-24 16:13:08 -05001101
Chris Masond352ac62008-09-29 15:18:18 -04001102/* find the first state struct with 'bits' set after 'start', and
1103 * return it. tree->lock must be held. NULL will returned if
1104 * nothing was found after 'start'
1105 */
Chris Masond7fc6402008-02-18 12:12:38 -05001106struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1107 u64 start, int bits)
1108{
1109 struct rb_node *node;
1110 struct extent_state *state;
1111
1112 /*
1113 * this search will find all the extents that end after
1114 * our range starts.
1115 */
1116 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001117 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001118 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001119
Chris Masond3977122009-01-05 21:25:51 -05001120 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001121 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001122 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001123 return state;
Chris Masond3977122009-01-05 21:25:51 -05001124
Chris Masond7fc6402008-02-18 12:12:38 -05001125 node = rb_next(node);
1126 if (!node)
1127 break;
1128 }
1129out:
1130 return NULL;
1131}
Chris Masond7fc6402008-02-18 12:12:38 -05001132
Chris Masond352ac62008-09-29 15:18:18 -04001133/*
1134 * find a contiguous range of bytes in the file marked as delalloc, not
1135 * more than 'max_bytes'. start and end are used to return the range,
1136 *
1137 * 1 is returned if we find something, 0 if nothing was in the tree
1138 */
Chris Masonc8b97812008-10-29 14:49:59 -04001139static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001140 u64 *start, u64 *end, u64 max_bytes,
1141 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001142{
1143 struct rb_node *node;
1144 struct extent_state *state;
1145 u64 cur_start = *start;
1146 u64 found = 0;
1147 u64 total_bytes = 0;
1148
Chris Masoncad321a2008-12-17 14:51:42 -05001149 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001150
Chris Masond1310b22008-01-24 16:13:08 -05001151 /*
1152 * this search will find all the extents that end after
1153 * our range starts.
1154 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001155 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001156 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001157 if (!found)
1158 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001159 goto out;
1160 }
1161
Chris Masond3977122009-01-05 21:25:51 -05001162 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001163 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001164 if (found && (state->start != cur_start ||
1165 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001166 goto out;
1167 }
1168 if (!(state->state & EXTENT_DELALLOC)) {
1169 if (!found)
1170 *end = state->end;
1171 goto out;
1172 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001173 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001174 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001175 *cached_state = state;
1176 atomic_inc(&state->refs);
1177 }
Chris Masond1310b22008-01-24 16:13:08 -05001178 found++;
1179 *end = state->end;
1180 cur_start = state->end + 1;
1181 node = rb_next(node);
1182 if (!node)
1183 break;
1184 total_bytes += state->end - state->start + 1;
1185 if (total_bytes >= max_bytes)
1186 break;
1187 }
1188out:
Chris Masoncad321a2008-12-17 14:51:42 -05001189 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001190 return found;
1191}
1192
Chris Masonc8b97812008-10-29 14:49:59 -04001193static noinline int __unlock_for_delalloc(struct inode *inode,
1194 struct page *locked_page,
1195 u64 start, u64 end)
1196{
1197 int ret;
1198 struct page *pages[16];
1199 unsigned long index = start >> PAGE_CACHE_SHIFT;
1200 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1201 unsigned long nr_pages = end_index - index + 1;
1202 int i;
1203
1204 if (index == locked_page->index && end_index == index)
1205 return 0;
1206
Chris Masond3977122009-01-05 21:25:51 -05001207 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001208 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001209 min_t(unsigned long, nr_pages,
1210 ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001211 for (i = 0; i < ret; i++) {
1212 if (pages[i] != locked_page)
1213 unlock_page(pages[i]);
1214 page_cache_release(pages[i]);
1215 }
1216 nr_pages -= ret;
1217 index += ret;
1218 cond_resched();
1219 }
1220 return 0;
1221}
1222
1223static noinline int lock_delalloc_pages(struct inode *inode,
1224 struct page *locked_page,
1225 u64 delalloc_start,
1226 u64 delalloc_end)
1227{
1228 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1229 unsigned long start_index = index;
1230 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1231 unsigned long pages_locked = 0;
1232 struct page *pages[16];
1233 unsigned long nrpages;
1234 int ret;
1235 int i;
1236
1237 /* the caller is responsible for locking the start index */
1238 if (index == locked_page->index && index == end_index)
1239 return 0;
1240
1241 /* skip the page at the start index */
1242 nrpages = end_index - index + 1;
Chris Masond3977122009-01-05 21:25:51 -05001243 while (nrpages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001244 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001245 min_t(unsigned long,
1246 nrpages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001247 if (ret == 0) {
1248 ret = -EAGAIN;
1249 goto done;
1250 }
1251 /* now we have an array of pages, lock them all */
1252 for (i = 0; i < ret; i++) {
1253 /*
1254 * the caller is taking responsibility for
1255 * locked_page
1256 */
Chris Mason771ed682008-11-06 22:02:51 -05001257 if (pages[i] != locked_page) {
Chris Masonc8b97812008-10-29 14:49:59 -04001258 lock_page(pages[i]);
Chris Masonf2b1c412008-11-10 07:31:30 -05001259 if (!PageDirty(pages[i]) ||
1260 pages[i]->mapping != inode->i_mapping) {
Chris Mason771ed682008-11-06 22:02:51 -05001261 ret = -EAGAIN;
1262 unlock_page(pages[i]);
1263 page_cache_release(pages[i]);
1264 goto done;
1265 }
1266 }
Chris Masonc8b97812008-10-29 14:49:59 -04001267 page_cache_release(pages[i]);
Chris Mason771ed682008-11-06 22:02:51 -05001268 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001269 }
Chris Masonc8b97812008-10-29 14:49:59 -04001270 nrpages -= ret;
1271 index += ret;
1272 cond_resched();
1273 }
1274 ret = 0;
1275done:
1276 if (ret && pages_locked) {
1277 __unlock_for_delalloc(inode, locked_page,
1278 delalloc_start,
1279 ((u64)(start_index + pages_locked - 1)) <<
1280 PAGE_CACHE_SHIFT);
1281 }
1282 return ret;
1283}
1284
1285/*
1286 * find a contiguous range of bytes in the file marked as delalloc, not
1287 * more than 'max_bytes'. start and end are used to return the range,
1288 *
1289 * 1 is returned if we find something, 0 if nothing was in the tree
1290 */
1291static noinline u64 find_lock_delalloc_range(struct inode *inode,
1292 struct extent_io_tree *tree,
1293 struct page *locked_page,
1294 u64 *start, u64 *end,
1295 u64 max_bytes)
1296{
1297 u64 delalloc_start;
1298 u64 delalloc_end;
1299 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001300 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001301 int ret;
1302 int loops = 0;
1303
1304again:
1305 /* step one, find a bunch of delalloc bytes starting at start */
1306 delalloc_start = *start;
1307 delalloc_end = 0;
1308 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001309 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001310 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001311 *start = delalloc_start;
1312 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001313 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001314 return found;
1315 }
1316
1317 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001318 * start comes from the offset of locked_page. We have to lock
1319 * pages in order, so we can't process delalloc bytes before
1320 * locked_page
1321 */
Chris Masond3977122009-01-05 21:25:51 -05001322 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001323 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001324
1325 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001326 * make sure to limit the number of pages we try to lock down
1327 * if we're looping.
1328 */
Chris Masond3977122009-01-05 21:25:51 -05001329 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
Chris Mason771ed682008-11-06 22:02:51 -05001330 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
Chris Masond3977122009-01-05 21:25:51 -05001331
Chris Masonc8b97812008-10-29 14:49:59 -04001332 /* step two, lock all the pages after the page that has start */
1333 ret = lock_delalloc_pages(inode, locked_page,
1334 delalloc_start, delalloc_end);
1335 if (ret == -EAGAIN) {
1336 /* some of the pages are gone, lets avoid looping by
1337 * shortening the size of the delalloc range we're searching
1338 */
Chris Mason9655d292009-09-02 15:22:30 -04001339 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001340 if (!loops) {
1341 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1342 max_bytes = PAGE_CACHE_SIZE - offset;
1343 loops = 1;
1344 goto again;
1345 } else {
1346 found = 0;
1347 goto out_failed;
1348 }
1349 }
1350 BUG_ON(ret);
1351
1352 /* step three, lock the state bits for the whole range */
Chris Mason9655d292009-09-02 15:22:30 -04001353 lock_extent_bits(tree, delalloc_start, delalloc_end,
1354 0, &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001355
1356 /* then test to make sure it is all still delalloc */
1357 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001358 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001359 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001360 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1361 &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001362 __unlock_for_delalloc(inode, locked_page,
1363 delalloc_start, delalloc_end);
1364 cond_resched();
1365 goto again;
1366 }
Chris Mason9655d292009-09-02 15:22:30 -04001367 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001368 *start = delalloc_start;
1369 *end = delalloc_end;
1370out_failed:
1371 return found;
1372}
1373
1374int extent_clear_unlock_delalloc(struct inode *inode,
1375 struct extent_io_tree *tree,
1376 u64 start, u64 end, struct page *locked_page,
Chris Masona791e352009-10-08 11:27:10 -04001377 unsigned long op)
Chris Masonc8b97812008-10-29 14:49:59 -04001378{
1379 int ret;
1380 struct page *pages[16];
1381 unsigned long index = start >> PAGE_CACHE_SHIFT;
1382 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1383 unsigned long nr_pages = end_index - index + 1;
1384 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001385 int clear_bits = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001386
Chris Masona791e352009-10-08 11:27:10 -04001387 if (op & EXTENT_CLEAR_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001388 clear_bits |= EXTENT_LOCKED;
Chris Masona791e352009-10-08 11:27:10 -04001389 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001390 clear_bits |= EXTENT_DIRTY;
1391
Chris Masona791e352009-10-08 11:27:10 -04001392 if (op & EXTENT_CLEAR_DELALLOC)
Chris Mason771ed682008-11-06 22:02:51 -05001393 clear_bits |= EXTENT_DELALLOC;
1394
Chris Mason2c64c532009-09-02 15:04:12 -04001395 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
Josef Bacik32c00af2009-10-08 13:34:05 -04001396 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1397 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1398 EXTENT_SET_PRIVATE2)))
Chris Mason771ed682008-11-06 22:02:51 -05001399 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001400
Chris Masond3977122009-01-05 21:25:51 -05001401 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001402 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001403 min_t(unsigned long,
1404 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001405 for (i = 0; i < ret; i++) {
Chris Mason8b62b722009-09-02 16:53:46 -04001406
Chris Masona791e352009-10-08 11:27:10 -04001407 if (op & EXTENT_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001408 SetPagePrivate2(pages[i]);
1409
Chris Masonc8b97812008-10-29 14:49:59 -04001410 if (pages[i] == locked_page) {
1411 page_cache_release(pages[i]);
1412 continue;
1413 }
Chris Masona791e352009-10-08 11:27:10 -04001414 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001415 clear_page_dirty_for_io(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001416 if (op & EXTENT_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001417 set_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001418 if (op & EXTENT_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001419 end_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001420 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
Chris Mason771ed682008-11-06 22:02:51 -05001421 unlock_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001422 page_cache_release(pages[i]);
1423 }
1424 nr_pages -= ret;
1425 index += ret;
1426 cond_resched();
1427 }
1428 return 0;
1429}
Chris Masonc8b97812008-10-29 14:49:59 -04001430
Chris Masond352ac62008-09-29 15:18:18 -04001431/*
1432 * count the number of bytes in the tree that have a given bit(s)
1433 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1434 * cached. The total number found is returned.
1435 */
Chris Masond1310b22008-01-24 16:13:08 -05001436u64 count_range_bits(struct extent_io_tree *tree,
1437 u64 *start, u64 search_end, u64 max_bytes,
Chris Masonec29ed52011-02-23 16:23:20 -05001438 unsigned long bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001439{
1440 struct rb_node *node;
1441 struct extent_state *state;
1442 u64 cur_start = *start;
1443 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001444 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001445 int found = 0;
1446
1447 if (search_end <= cur_start) {
Chris Masond1310b22008-01-24 16:13:08 -05001448 WARN_ON(1);
1449 return 0;
1450 }
1451
Chris Masoncad321a2008-12-17 14:51:42 -05001452 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001453 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1454 total_bytes = tree->dirty_bytes;
1455 goto out;
1456 }
1457 /*
1458 * this search will find all the extents that end after
1459 * our range starts.
1460 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001461 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001462 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001463 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001464
Chris Masond3977122009-01-05 21:25:51 -05001465 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001466 state = rb_entry(node, struct extent_state, rb_node);
1467 if (state->start > search_end)
1468 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001469 if (contig && found && state->start > last + 1)
1470 break;
1471 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001472 total_bytes += min(search_end, state->end) + 1 -
1473 max(cur_start, state->start);
1474 if (total_bytes >= max_bytes)
1475 break;
1476 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001477 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001478 found = 1;
1479 }
Chris Masonec29ed52011-02-23 16:23:20 -05001480 last = state->end;
1481 } else if (contig && found) {
1482 break;
Chris Masond1310b22008-01-24 16:13:08 -05001483 }
1484 node = rb_next(node);
1485 if (!node)
1486 break;
1487 }
1488out:
Chris Masoncad321a2008-12-17 14:51:42 -05001489 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001490 return total_bytes;
1491}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001492
Chris Masond352ac62008-09-29 15:18:18 -04001493/*
1494 * set the private field for a given byte offset in the tree. If there isn't
1495 * an extent_state there already, this does nothing.
1496 */
Chris Masond1310b22008-01-24 16:13:08 -05001497int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1498{
1499 struct rb_node *node;
1500 struct extent_state *state;
1501 int ret = 0;
1502
Chris Masoncad321a2008-12-17 14:51:42 -05001503 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001504 /*
1505 * this search will find all the extents that end after
1506 * our range starts.
1507 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001508 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001509 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001510 ret = -ENOENT;
1511 goto out;
1512 }
1513 state = rb_entry(node, struct extent_state, rb_node);
1514 if (state->start != start) {
1515 ret = -ENOENT;
1516 goto out;
1517 }
1518 state->private = private;
1519out:
Chris Masoncad321a2008-12-17 14:51:42 -05001520 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001521 return ret;
1522}
1523
1524int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1525{
1526 struct rb_node *node;
1527 struct extent_state *state;
1528 int ret = 0;
1529
Chris Masoncad321a2008-12-17 14:51:42 -05001530 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001531 /*
1532 * this search will find all the extents that end after
1533 * our range starts.
1534 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001535 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001536 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001537 ret = -ENOENT;
1538 goto out;
1539 }
1540 state = rb_entry(node, struct extent_state, rb_node);
1541 if (state->start != start) {
1542 ret = -ENOENT;
1543 goto out;
1544 }
1545 *private = state->private;
1546out:
Chris Masoncad321a2008-12-17 14:51:42 -05001547 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001548 return ret;
1549}
1550
1551/*
1552 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001553 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001554 * has the bits set. Otherwise, 1 is returned if any bit in the
1555 * range is found set.
1556 */
1557int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason9655d292009-09-02 15:22:30 -04001558 int bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001559{
1560 struct extent_state *state = NULL;
1561 struct rb_node *node;
1562 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001563
Chris Masoncad321a2008-12-17 14:51:42 -05001564 spin_lock(&tree->lock);
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001565 if (cached && cached->tree && cached->start <= start &&
1566 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04001567 node = &cached->rb_node;
1568 else
1569 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001570 while (node && start <= end) {
1571 state = rb_entry(node, struct extent_state, rb_node);
1572
1573 if (filled && state->start > start) {
1574 bitset = 0;
1575 break;
1576 }
1577
1578 if (state->start > end)
1579 break;
1580
1581 if (state->state & bits) {
1582 bitset = 1;
1583 if (!filled)
1584 break;
1585 } else if (filled) {
1586 bitset = 0;
1587 break;
1588 }
Chris Mason46562ce2009-09-23 20:23:16 -04001589
1590 if (state->end == (u64)-1)
1591 break;
1592
Chris Masond1310b22008-01-24 16:13:08 -05001593 start = state->end + 1;
1594 if (start > end)
1595 break;
1596 node = rb_next(node);
1597 if (!node) {
1598 if (filled)
1599 bitset = 0;
1600 break;
1601 }
1602 }
Chris Masoncad321a2008-12-17 14:51:42 -05001603 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001604 return bitset;
1605}
Chris Masond1310b22008-01-24 16:13:08 -05001606
1607/*
1608 * helper function to set a given page up to date if all the
1609 * extents in the tree for that page are up to date
1610 */
1611static int check_page_uptodate(struct extent_io_tree *tree,
1612 struct page *page)
1613{
1614 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1615 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001616 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001617 SetPageUptodate(page);
1618 return 0;
1619}
1620
1621/*
1622 * helper function to unlock a page if all the extents in the tree
1623 * for that page are unlocked
1624 */
1625static int check_page_locked(struct extent_io_tree *tree,
1626 struct page *page)
1627{
1628 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1629 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001630 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001631 unlock_page(page);
1632 return 0;
1633}
1634
1635/*
1636 * helper function to end page writeback if all the extents
1637 * in the tree for that page are done with writeback
1638 */
1639static int check_page_writeback(struct extent_io_tree *tree,
1640 struct page *page)
1641{
Chris Mason1edbb732009-09-02 13:24:36 -04001642 end_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05001643 return 0;
1644}
1645
1646/* lots and lots of room for performance fixes in the end_bio funcs */
1647
1648/*
1649 * after a writepage IO is done, we need to:
1650 * clear the uptodate bits on error
1651 * clear the writeback bits in the extent tree for this IO
1652 * end_page_writeback if the page has no more pending IO
1653 *
1654 * Scheduling is not allowed, so the extent state tree is expected
1655 * to have one and only one object corresponding to this IO.
1656 */
Chris Masond1310b22008-01-24 16:13:08 -05001657static void end_bio_extent_writepage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001658{
Chris Mason1259ab72008-05-12 13:39:03 -04001659 int uptodate = err == 0;
Chris Masond1310b22008-01-24 16:13:08 -05001660 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001661 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001662 u64 start;
1663 u64 end;
1664 int whole_page;
Chris Mason1259ab72008-05-12 13:39:03 -04001665 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05001666
Chris Masond1310b22008-01-24 16:13:08 -05001667 do {
1668 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001669 tree = &BTRFS_I(page->mapping->host)->io_tree;
1670
Chris Masond1310b22008-01-24 16:13:08 -05001671 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1672 bvec->bv_offset;
1673 end = start + bvec->bv_len - 1;
1674
1675 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1676 whole_page = 1;
1677 else
1678 whole_page = 0;
1679
1680 if (--bvec >= bio->bi_io_vec)
1681 prefetchw(&bvec->bv_page->flags);
Chris Mason1259ab72008-05-12 13:39:03 -04001682 if (tree->ops && tree->ops->writepage_end_io_hook) {
1683 ret = tree->ops->writepage_end_io_hook(page, start,
David Woodhouse902b22f2008-08-20 08:51:49 -04001684 end, NULL, uptodate);
Chris Mason1259ab72008-05-12 13:39:03 -04001685 if (ret)
1686 uptodate = 0;
1687 }
1688
1689 if (!uptodate && tree->ops &&
1690 tree->ops->writepage_io_failed_hook) {
1691 ret = tree->ops->writepage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001692 start, end, NULL);
Chris Mason1259ab72008-05-12 13:39:03 -04001693 if (ret == 0) {
Chris Mason1259ab72008-05-12 13:39:03 -04001694 uptodate = (err == 0);
1695 continue;
1696 }
1697 }
1698
Chris Masond1310b22008-01-24 16:13:08 -05001699 if (!uptodate) {
Josef Bacik2ac55d42010-02-03 19:33:23 +00001700 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05001701 ClearPageUptodate(page);
1702 SetPageError(page);
1703 }
Chris Mason70dec802008-01-29 09:59:12 -05001704
Chris Masond1310b22008-01-24 16:13:08 -05001705 if (whole_page)
1706 end_page_writeback(page);
1707 else
1708 check_page_writeback(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05001709 } while (bvec >= bio->bi_io_vec);
Chris Mason2b1f55b2008-09-24 11:48:04 -04001710
Chris Masond1310b22008-01-24 16:13:08 -05001711 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001712}
1713
1714/*
1715 * after a readpage IO is done, we need to:
1716 * clear the uptodate bits on error
1717 * set the uptodate bits if things worked
1718 * set the page up to date if all extents in the tree are uptodate
1719 * clear the lock bit in the extent tree
1720 * unlock the page if there are no other extents locked for it
1721 *
1722 * Scheduling is not allowed, so the extent state tree is expected
1723 * to have one and only one object corresponding to this IO.
1724 */
Chris Masond1310b22008-01-24 16:13:08 -05001725static void end_bio_extent_readpage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001726{
1727 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Mason4125bf72010-02-03 18:18:45 +00001728 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1729 struct bio_vec *bvec = bio->bi_io_vec;
David Woodhouse902b22f2008-08-20 08:51:49 -04001730 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001731 u64 start;
1732 u64 end;
1733 int whole_page;
1734 int ret;
1735
Chris Masond20f7042008-12-08 16:58:54 -05001736 if (err)
1737 uptodate = 0;
1738
Chris Masond1310b22008-01-24 16:13:08 -05001739 do {
1740 struct page *page = bvec->bv_page;
Arne Jansen507903b2011-04-06 10:02:20 +00001741 struct extent_state *cached = NULL;
1742 struct extent_state *state;
1743
David Woodhouse902b22f2008-08-20 08:51:49 -04001744 tree = &BTRFS_I(page->mapping->host)->io_tree;
1745
Chris Masond1310b22008-01-24 16:13:08 -05001746 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1747 bvec->bv_offset;
1748 end = start + bvec->bv_len - 1;
1749
1750 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1751 whole_page = 1;
1752 else
1753 whole_page = 0;
1754
Chris Mason4125bf72010-02-03 18:18:45 +00001755 if (++bvec <= bvec_end)
Chris Masond1310b22008-01-24 16:13:08 -05001756 prefetchw(&bvec->bv_page->flags);
1757
Arne Jansen507903b2011-04-06 10:02:20 +00001758 spin_lock(&tree->lock);
Chris Mason0d399202011-04-16 06:55:39 -04001759 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
Chris Mason109b36a2011-04-12 13:57:39 -04001760 if (state && state->start == start) {
Arne Jansen507903b2011-04-06 10:02:20 +00001761 /*
1762 * take a reference on the state, unlock will drop
1763 * the ref
1764 */
1765 cache_state(state, &cached);
1766 }
1767 spin_unlock(&tree->lock);
1768
Chris Masond1310b22008-01-24 16:13:08 -05001769 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
Chris Mason70dec802008-01-29 09:59:12 -05001770 ret = tree->ops->readpage_end_io_hook(page, start, end,
Arne Jansen507903b2011-04-06 10:02:20 +00001771 state);
Chris Masond1310b22008-01-24 16:13:08 -05001772 if (ret)
1773 uptodate = 0;
1774 }
Chris Mason7e383262008-04-09 16:28:12 -04001775 if (!uptodate && tree->ops &&
1776 tree->ops->readpage_io_failed_hook) {
1777 ret = tree->ops->readpage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001778 start, end, NULL);
Chris Mason7e383262008-04-09 16:28:12 -04001779 if (ret == 0) {
Chris Mason3b951512008-04-17 11:29:12 -04001780 uptodate =
1781 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masond20f7042008-12-08 16:58:54 -05001782 if (err)
1783 uptodate = 0;
Arne Jansen507903b2011-04-06 10:02:20 +00001784 uncache_state(&cached);
Chris Mason7e383262008-04-09 16:28:12 -04001785 continue;
1786 }
1787 }
Chris Mason70dec802008-01-29 09:59:12 -05001788
Chris Mason771ed682008-11-06 22:02:51 -05001789 if (uptodate) {
Arne Jansen507903b2011-04-06 10:02:20 +00001790 set_extent_uptodate(tree, start, end, &cached,
David Woodhouse902b22f2008-08-20 08:51:49 -04001791 GFP_ATOMIC);
Chris Mason771ed682008-11-06 22:02:51 -05001792 }
Arne Jansen507903b2011-04-06 10:02:20 +00001793 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05001794
Chris Mason70dec802008-01-29 09:59:12 -05001795 if (whole_page) {
1796 if (uptodate) {
1797 SetPageUptodate(page);
1798 } else {
1799 ClearPageUptodate(page);
1800 SetPageError(page);
1801 }
Chris Masond1310b22008-01-24 16:13:08 -05001802 unlock_page(page);
Chris Mason70dec802008-01-29 09:59:12 -05001803 } else {
1804 if (uptodate) {
1805 check_page_uptodate(tree, page);
1806 } else {
1807 ClearPageUptodate(page);
1808 SetPageError(page);
1809 }
Chris Masond1310b22008-01-24 16:13:08 -05001810 check_page_locked(tree, page);
Chris Mason70dec802008-01-29 09:59:12 -05001811 }
Chris Mason4125bf72010-02-03 18:18:45 +00001812 } while (bvec <= bvec_end);
Chris Masond1310b22008-01-24 16:13:08 -05001813
1814 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001815}
1816
Miao Xie88f794e2010-11-22 03:02:55 +00001817struct bio *
1818btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1819 gfp_t gfp_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001820{
1821 struct bio *bio;
1822
1823 bio = bio_alloc(gfp_flags, nr_vecs);
1824
1825 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1826 while (!bio && (nr_vecs /= 2))
1827 bio = bio_alloc(gfp_flags, nr_vecs);
1828 }
1829
1830 if (bio) {
Chris Masone1c4b742008-04-22 13:26:46 -04001831 bio->bi_size = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001832 bio->bi_bdev = bdev;
1833 bio->bi_sector = first_sector;
1834 }
1835 return bio;
1836}
1837
Chris Masonc8b97812008-10-29 14:49:59 -04001838static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1839 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001840{
Chris Masond1310b22008-01-24 16:13:08 -05001841 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05001842 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1843 struct page *page = bvec->bv_page;
1844 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05001845 u64 start;
Chris Mason70dec802008-01-29 09:59:12 -05001846
1847 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
Chris Mason70dec802008-01-29 09:59:12 -05001848
David Woodhouse902b22f2008-08-20 08:51:49 -04001849 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001850
1851 bio_get(bio);
1852
Chris Mason065631f2008-02-20 12:07:25 -05001853 if (tree->ops && tree->ops->submit_bio_hook)
liubo6b82ce82011-01-26 06:21:39 +00001854 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masoneaf25d92010-05-25 09:48:28 -04001855 mirror_num, bio_flags, start);
Chris Mason0b86a832008-03-24 15:01:56 -04001856 else
1857 submit_bio(rw, bio);
Chris Masond1310b22008-01-24 16:13:08 -05001858 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1859 ret = -EOPNOTSUPP;
1860 bio_put(bio);
1861 return ret;
1862}
1863
1864static int submit_extent_page(int rw, struct extent_io_tree *tree,
1865 struct page *page, sector_t sector,
1866 size_t size, unsigned long offset,
1867 struct block_device *bdev,
1868 struct bio **bio_ret,
1869 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04001870 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04001871 int mirror_num,
1872 unsigned long prev_bio_flags,
1873 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001874{
1875 int ret = 0;
1876 struct bio *bio;
1877 int nr;
Chris Masonc8b97812008-10-29 14:49:59 -04001878 int contig = 0;
1879 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1880 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Chris Mason5b050f02008-11-11 09:34:41 -05001881 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05001882
1883 if (bio_ret && *bio_ret) {
1884 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001885 if (old_compressed)
1886 contig = bio->bi_sector == sector;
1887 else
1888 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1889 sector;
1890
1891 if (prev_bio_flags != bio_flags || !contig ||
Chris Mason239b14b2008-03-24 15:02:07 -04001892 (tree->ops && tree->ops->merge_bio_hook &&
Chris Masonc8b97812008-10-29 14:49:59 -04001893 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1894 bio_flags)) ||
1895 bio_add_page(bio, page, page_size, offset) < page_size) {
1896 ret = submit_one_bio(rw, bio, mirror_num,
1897 prev_bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05001898 bio = NULL;
1899 } else {
1900 return 0;
1901 }
1902 }
Chris Masonc8b97812008-10-29 14:49:59 -04001903 if (this_compressed)
1904 nr = BIO_MAX_PAGES;
1905 else
1906 nr = bio_get_nr_vecs(bdev);
1907
Miao Xie88f794e2010-11-22 03:02:55 +00001908 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
Tsutomu Itoh5df67082011-02-01 09:17:35 +00001909 if (!bio)
1910 return -ENOMEM;
Chris Mason70dec802008-01-29 09:59:12 -05001911
Chris Masonc8b97812008-10-29 14:49:59 -04001912 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05001913 bio->bi_end_io = end_io_func;
1914 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05001915
Chris Masond3977122009-01-05 21:25:51 -05001916 if (bio_ret)
Chris Masond1310b22008-01-24 16:13:08 -05001917 *bio_ret = bio;
Chris Masond3977122009-01-05 21:25:51 -05001918 else
Chris Masonc8b97812008-10-29 14:49:59 -04001919 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05001920
1921 return ret;
1922}
1923
1924void set_page_extent_mapped(struct page *page)
1925{
1926 if (!PagePrivate(page)) {
1927 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001928 page_cache_get(page);
Chris Mason6af118c2008-07-22 11:18:07 -04001929 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05001930 }
1931}
1932
Christoph Hellwigb2950862008-12-02 09:54:17 -05001933static void set_page_extent_head(struct page *page, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05001934{
Chris Masoneb14ab82011-02-10 12:35:00 -05001935 WARN_ON(!PagePrivate(page));
Chris Masond1310b22008-01-24 16:13:08 -05001936 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1937}
1938
1939/*
1940 * basic readpage implementation. Locked extent state structs are inserted
1941 * into the tree that are removed when the IO is done (by the end_io
1942 * handlers)
1943 */
1944static int __extent_read_full_page(struct extent_io_tree *tree,
1945 struct page *page,
1946 get_extent_t *get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04001947 struct bio **bio, int mirror_num,
1948 unsigned long *bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001949{
1950 struct inode *inode = page->mapping->host;
1951 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1952 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1953 u64 end;
1954 u64 cur = start;
1955 u64 extent_offset;
1956 u64 last_byte = i_size_read(inode);
1957 u64 block_start;
1958 u64 cur_end;
1959 sector_t sector;
1960 struct extent_map *em;
1961 struct block_device *bdev;
Josef Bacik11c65dc2010-05-23 11:07:21 -04001962 struct btrfs_ordered_extent *ordered;
Chris Masond1310b22008-01-24 16:13:08 -05001963 int ret;
1964 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02001965 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001966 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04001967 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05001968 size_t blocksize = inode->i_sb->s_blocksize;
Chris Masonc8b97812008-10-29 14:49:59 -04001969 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001970
1971 set_page_extent_mapped(page);
1972
Dan Magenheimer90a887c2011-05-26 10:01:56 -06001973 if (!PageUptodate(page)) {
1974 if (cleancache_get_page(page) == 0) {
1975 BUG_ON(blocksize != PAGE_SIZE);
1976 goto out;
1977 }
1978 }
1979
Chris Masond1310b22008-01-24 16:13:08 -05001980 end = page_end;
Josef Bacik11c65dc2010-05-23 11:07:21 -04001981 while (1) {
1982 lock_extent(tree, start, end, GFP_NOFS);
1983 ordered = btrfs_lookup_ordered_extent(inode, start);
1984 if (!ordered)
1985 break;
1986 unlock_extent(tree, start, end, GFP_NOFS);
1987 btrfs_start_ordered_extent(inode, ordered, 1);
1988 btrfs_put_ordered_extent(ordered);
1989 }
Chris Masond1310b22008-01-24 16:13:08 -05001990
Chris Masonc8b97812008-10-29 14:49:59 -04001991 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1992 char *userpage;
1993 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1994
1995 if (zero_offset) {
1996 iosize = PAGE_CACHE_SIZE - zero_offset;
1997 userpage = kmap_atomic(page, KM_USER0);
1998 memset(userpage + zero_offset, 0, iosize);
1999 flush_dcache_page(page);
2000 kunmap_atomic(userpage, KM_USER0);
2001 }
2002 }
Chris Masond1310b22008-01-24 16:13:08 -05002003 while (cur <= end) {
2004 if (cur >= last_byte) {
2005 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002006 struct extent_state *cached = NULL;
2007
David Sterba306e16c2011-04-19 14:29:38 +02002008 iosize = PAGE_CACHE_SIZE - pg_offset;
Chris Masond1310b22008-01-24 16:13:08 -05002009 userpage = kmap_atomic(page, KM_USER0);
David Sterba306e16c2011-04-19 14:29:38 +02002010 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002011 flush_dcache_page(page);
2012 kunmap_atomic(userpage, KM_USER0);
2013 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002014 &cached, GFP_NOFS);
2015 unlock_extent_cached(tree, cur, cur + iosize - 1,
2016 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002017 break;
2018 }
David Sterba306e16c2011-04-19 14:29:38 +02002019 em = get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002020 end - cur + 1, 0);
David Sterbac7040052011-04-19 18:00:01 +02002021 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002022 SetPageError(page);
2023 unlock_extent(tree, cur, end, GFP_NOFS);
2024 break;
2025 }
Chris Masond1310b22008-01-24 16:13:08 -05002026 extent_offset = cur - em->start;
2027 BUG_ON(extent_map_end(em) <= cur);
2028 BUG_ON(end < cur);
2029
Li Zefan261507a02010-12-17 14:21:50 +08002030 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Chris Masonc8b97812008-10-29 14:49:59 -04002031 this_bio_flag = EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08002032 extent_set_compress_type(&this_bio_flag,
2033 em->compress_type);
2034 }
Chris Masonc8b97812008-10-29 14:49:59 -04002035
Chris Masond1310b22008-01-24 16:13:08 -05002036 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2037 cur_end = min(extent_map_end(em) - 1, end);
2038 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002039 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2040 disk_io_size = em->block_len;
2041 sector = em->block_start >> 9;
2042 } else {
2043 sector = (em->block_start + extent_offset) >> 9;
2044 disk_io_size = iosize;
2045 }
Chris Masond1310b22008-01-24 16:13:08 -05002046 bdev = em->bdev;
2047 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002048 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2049 block_start = EXTENT_MAP_HOLE;
Chris Masond1310b22008-01-24 16:13:08 -05002050 free_extent_map(em);
2051 em = NULL;
2052
2053 /* we've found a hole, just zero and go on */
2054 if (block_start == EXTENT_MAP_HOLE) {
2055 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002056 struct extent_state *cached = NULL;
2057
Chris Masond1310b22008-01-24 16:13:08 -05002058 userpage = kmap_atomic(page, KM_USER0);
David Sterba306e16c2011-04-19 14:29:38 +02002059 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002060 flush_dcache_page(page);
2061 kunmap_atomic(userpage, KM_USER0);
2062
2063 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002064 &cached, GFP_NOFS);
2065 unlock_extent_cached(tree, cur, cur + iosize - 1,
2066 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002067 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002068 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002069 continue;
2070 }
2071 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04002072 if (test_range_bit(tree, cur, cur_end,
2073 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04002074 check_page_uptodate(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05002075 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2076 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002077 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002078 continue;
2079 }
Chris Mason70dec802008-01-29 09:59:12 -05002080 /* we have an inline extent but it didn't get marked up
2081 * to date. Error out
2082 */
2083 if (block_start == EXTENT_MAP_INLINE) {
2084 SetPageError(page);
2085 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2086 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002087 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05002088 continue;
2089 }
Chris Masond1310b22008-01-24 16:13:08 -05002090
2091 ret = 0;
2092 if (tree->ops && tree->ops->readpage_io_hook) {
2093 ret = tree->ops->readpage_io_hook(page, cur,
2094 cur + iosize - 1);
2095 }
2096 if (!ret) {
Chris Mason89642222008-07-24 09:41:53 -04002097 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2098 pnr -= page->index;
Chris Masond1310b22008-01-24 16:13:08 -05002099 ret = submit_extent_page(READ, tree, page,
David Sterba306e16c2011-04-19 14:29:38 +02002100 sector, disk_io_size, pg_offset,
Chris Mason89642222008-07-24 09:41:53 -04002101 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04002102 end_bio_extent_readpage, mirror_num,
2103 *bio_flags,
2104 this_bio_flag);
Chris Mason89642222008-07-24 09:41:53 -04002105 nr++;
Chris Masonc8b97812008-10-29 14:49:59 -04002106 *bio_flags = this_bio_flag;
Chris Masond1310b22008-01-24 16:13:08 -05002107 }
2108 if (ret)
2109 SetPageError(page);
2110 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002111 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002112 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002113out:
Chris Masond1310b22008-01-24 16:13:08 -05002114 if (!nr) {
2115 if (!PageError(page))
2116 SetPageUptodate(page);
2117 unlock_page(page);
2118 }
2119 return 0;
2120}
2121
2122int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2123 get_extent_t *get_extent)
2124{
2125 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04002126 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002127 int ret;
2128
Chris Masonc8b97812008-10-29 14:49:59 -04002129 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2130 &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002131 if (bio)
liubo6b82ce82011-01-26 06:21:39 +00002132 ret = submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002133 return ret;
2134}
Chris Masond1310b22008-01-24 16:13:08 -05002135
Chris Mason11c83492009-04-20 15:50:09 -04002136static noinline void update_nr_written(struct page *page,
2137 struct writeback_control *wbc,
2138 unsigned long nr_written)
2139{
2140 wbc->nr_to_write -= nr_written;
2141 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2142 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2143 page->mapping->writeback_index = page->index + nr_written;
2144}
2145
Chris Masond1310b22008-01-24 16:13:08 -05002146/*
2147 * the writepage semantics are similar to regular writepage. extent
2148 * records are inserted to lock ranges in the tree, and as dirty areas
2149 * are found, they are marked writeback. Then the lock bits are removed
2150 * and the end_io handler clears the writeback ranges
2151 */
2152static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2153 void *data)
2154{
2155 struct inode *inode = page->mapping->host;
2156 struct extent_page_data *epd = data;
2157 struct extent_io_tree *tree = epd->tree;
2158 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2159 u64 delalloc_start;
2160 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2161 u64 end;
2162 u64 cur = start;
2163 u64 extent_offset;
2164 u64 last_byte = i_size_read(inode);
2165 u64 block_start;
2166 u64 iosize;
2167 sector_t sector;
Chris Mason2c64c532009-09-02 15:04:12 -04002168 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002169 struct extent_map *em;
2170 struct block_device *bdev;
2171 int ret;
2172 int nr = 0;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002173 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002174 size_t blocksize;
2175 loff_t i_size = i_size_read(inode);
2176 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2177 u64 nr_delalloc;
2178 u64 delalloc_end;
Chris Masonc8b97812008-10-29 14:49:59 -04002179 int page_started;
2180 int compressed;
Chris Masonffbd5172009-04-20 15:50:09 -04002181 int write_flags;
Chris Mason771ed682008-11-06 22:02:51 -05002182 unsigned long nr_written = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002183
Chris Masonffbd5172009-04-20 15:50:09 -04002184 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboe721a9602011-03-09 11:56:30 +01002185 write_flags = WRITE_SYNC;
Chris Masonffbd5172009-04-20 15:50:09 -04002186 else
2187 write_flags = WRITE;
2188
liubo1abe9b82011-03-24 11:18:59 +00002189 trace___extent_writepage(page, inode, wbc);
2190
Chris Masond1310b22008-01-24 16:13:08 -05002191 WARN_ON(!PageLocked(page));
Chris Mason7f3c74f2008-07-18 12:01:11 -04002192 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason211c17f2008-05-15 09:13:45 -04002193 if (page->index > end_index ||
Chris Mason7f3c74f2008-07-18 12:01:11 -04002194 (page->index == end_index && !pg_offset)) {
Chris Mason39be25c2008-11-10 11:50:50 -05002195 page->mapping->a_ops->invalidatepage(page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002196 unlock_page(page);
2197 return 0;
2198 }
2199
2200 if (page->index == end_index) {
2201 char *userpage;
2202
Chris Masond1310b22008-01-24 16:13:08 -05002203 userpage = kmap_atomic(page, KM_USER0);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002204 memset(userpage + pg_offset, 0,
2205 PAGE_CACHE_SIZE - pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05002206 kunmap_atomic(userpage, KM_USER0);
Chris Mason211c17f2008-05-15 09:13:45 -04002207 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002208 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002209 pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002210
2211 set_page_extent_mapped(page);
2212
2213 delalloc_start = start;
2214 delalloc_end = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002215 page_started = 0;
Chris Mason771ed682008-11-06 22:02:51 -05002216 if (!epd->extent_locked) {
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002217 u64 delalloc_to_write = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002218 /*
2219 * make sure the wbc mapping index is at least updated
2220 * to this page.
2221 */
2222 update_nr_written(page, wbc, 0);
2223
Chris Masond3977122009-01-05 21:25:51 -05002224 while (delalloc_end < page_end) {
Chris Mason771ed682008-11-06 22:02:51 -05002225 nr_delalloc = find_lock_delalloc_range(inode, tree,
Chris Masonc8b97812008-10-29 14:49:59 -04002226 page,
2227 &delalloc_start,
Chris Masond1310b22008-01-24 16:13:08 -05002228 &delalloc_end,
2229 128 * 1024 * 1024);
Chris Mason771ed682008-11-06 22:02:51 -05002230 if (nr_delalloc == 0) {
2231 delalloc_start = delalloc_end + 1;
2232 continue;
2233 }
2234 tree->ops->fill_delalloc(inode, page, delalloc_start,
2235 delalloc_end, &page_started,
2236 &nr_written);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002237 /*
2238 * delalloc_end is already one less than the total
2239 * length, so we don't subtract one from
2240 * PAGE_CACHE_SIZE
2241 */
2242 delalloc_to_write += (delalloc_end - delalloc_start +
2243 PAGE_CACHE_SIZE) >>
2244 PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002245 delalloc_start = delalloc_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002246 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002247 if (wbc->nr_to_write < delalloc_to_write) {
2248 int thresh = 8192;
2249
2250 if (delalloc_to_write < thresh * 2)
2251 thresh = delalloc_to_write;
2252 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2253 thresh);
2254 }
Chris Masonc8b97812008-10-29 14:49:59 -04002255
Chris Mason771ed682008-11-06 22:02:51 -05002256 /* did the fill delalloc function already unlock and start
2257 * the IO?
2258 */
2259 if (page_started) {
2260 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002261 /*
2262 * we've unlocked the page, so we can't update
2263 * the mapping's writeback index, just update
2264 * nr_to_write.
2265 */
2266 wbc->nr_to_write -= nr_written;
2267 goto done_unlocked;
Chris Mason771ed682008-11-06 22:02:51 -05002268 }
Chris Masonc8b97812008-10-29 14:49:59 -04002269 }
Chris Mason247e7432008-07-17 12:53:51 -04002270 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04002271 ret = tree->ops->writepage_start_hook(page, start,
2272 page_end);
Chris Mason247e7432008-07-17 12:53:51 -04002273 if (ret == -EAGAIN) {
Chris Mason247e7432008-07-17 12:53:51 -04002274 redirty_page_for_writepage(wbc, page);
Chris Mason11c83492009-04-20 15:50:09 -04002275 update_nr_written(page, wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04002276 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002277 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002278 goto done_unlocked;
Chris Mason247e7432008-07-17 12:53:51 -04002279 }
2280 }
2281
Chris Mason11c83492009-04-20 15:50:09 -04002282 /*
2283 * we don't want to touch the inode after unlocking the page,
2284 * so we update the mapping writeback index now
2285 */
2286 update_nr_written(page, wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05002287
Chris Masond1310b22008-01-24 16:13:08 -05002288 end = page_end;
Chris Masond1310b22008-01-24 16:13:08 -05002289 if (last_byte <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002290 if (tree->ops && tree->ops->writepage_end_io_hook)
2291 tree->ops->writepage_end_io_hook(page, start,
2292 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002293 goto done;
2294 }
2295
Chris Masond1310b22008-01-24 16:13:08 -05002296 blocksize = inode->i_sb->s_blocksize;
2297
2298 while (cur <= end) {
2299 if (cur >= last_byte) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002300 if (tree->ops && tree->ops->writepage_end_io_hook)
2301 tree->ops->writepage_end_io_hook(page, cur,
2302 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002303 break;
2304 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002305 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002306 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02002307 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002308 SetPageError(page);
2309 break;
2310 }
2311
2312 extent_offset = cur - em->start;
2313 BUG_ON(extent_map_end(em) <= cur);
2314 BUG_ON(end < cur);
2315 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2316 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2317 sector = (em->block_start + extent_offset) >> 9;
2318 bdev = em->bdev;
2319 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04002320 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05002321 free_extent_map(em);
2322 em = NULL;
2323
Chris Masonc8b97812008-10-29 14:49:59 -04002324 /*
2325 * compressed and inline extents are written through other
2326 * paths in the FS
2327 */
2328 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05002329 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04002330 /*
2331 * end_io notification does not happen here for
2332 * compressed extents
2333 */
2334 if (!compressed && tree->ops &&
2335 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04002336 tree->ops->writepage_end_io_hook(page, cur,
2337 cur + iosize - 1,
2338 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002339 else if (compressed) {
2340 /* we don't want to end_page_writeback on
2341 * a compressed extent. this happens
2342 * elsewhere
2343 */
2344 nr++;
2345 }
2346
2347 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002348 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002349 continue;
2350 }
Chris Masond1310b22008-01-24 16:13:08 -05002351 /* leave this out until we have a page_mkwrite call */
2352 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
Chris Mason9655d292009-09-02 15:22:30 -04002353 EXTENT_DIRTY, 0, NULL)) {
Chris Masond1310b22008-01-24 16:13:08 -05002354 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002355 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002356 continue;
2357 }
Chris Masonc8b97812008-10-29 14:49:59 -04002358
Chris Masond1310b22008-01-24 16:13:08 -05002359 if (tree->ops && tree->ops->writepage_io_hook) {
2360 ret = tree->ops->writepage_io_hook(page, cur,
2361 cur + iosize - 1);
2362 } else {
2363 ret = 0;
2364 }
Chris Mason1259ab72008-05-12 13:39:03 -04002365 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05002366 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04002367 } else {
Chris Masond1310b22008-01-24 16:13:08 -05002368 unsigned long max_nr = end_index + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002369
Chris Masond1310b22008-01-24 16:13:08 -05002370 set_range_writeback(tree, cur, cur + iosize - 1);
2371 if (!PageWriteback(page)) {
Chris Masond3977122009-01-05 21:25:51 -05002372 printk(KERN_ERR "btrfs warning page %lu not "
2373 "writeback, cur %llu end %llu\n",
2374 page->index, (unsigned long long)cur,
Chris Masond1310b22008-01-24 16:13:08 -05002375 (unsigned long long)end);
2376 }
2377
Chris Masonffbd5172009-04-20 15:50:09 -04002378 ret = submit_extent_page(write_flags, tree, page,
2379 sector, iosize, pg_offset,
2380 bdev, &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04002381 end_bio_extent_writepage,
2382 0, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002383 if (ret)
2384 SetPageError(page);
2385 }
2386 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002387 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002388 nr++;
2389 }
2390done:
2391 if (nr == 0) {
2392 /* make sure the mapping tag for page dirty gets cleared */
2393 set_page_writeback(page);
2394 end_page_writeback(page);
2395 }
Chris Masond1310b22008-01-24 16:13:08 -05002396 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002397
Chris Mason11c83492009-04-20 15:50:09 -04002398done_unlocked:
2399
Chris Mason2c64c532009-09-02 15:04:12 -04002400 /* drop our reference on any cached states */
2401 free_extent_state(cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05002402 return 0;
2403}
2404
Chris Masond1310b22008-01-24 16:13:08 -05002405/**
Chris Mason4bef0842008-09-08 11:18:08 -04002406 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05002407 * @mapping: address space structure to write
2408 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2409 * @writepage: function called for each page
2410 * @data: data passed to writepage function
2411 *
2412 * If a page is already under I/O, write_cache_pages() skips it, even
2413 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2414 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2415 * and msync() need to guarantee that all the data which was dirty at the time
2416 * the call was made get new I/O started against them. If wbc->sync_mode is
2417 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2418 * existing IO to complete.
2419 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05002420static int extent_write_cache_pages(struct extent_io_tree *tree,
Chris Mason4bef0842008-09-08 11:18:08 -04002421 struct address_space *mapping,
2422 struct writeback_control *wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05002423 writepage_t writepage, void *data,
2424 void (*flush_fn)(void *))
Chris Masond1310b22008-01-24 16:13:08 -05002425{
Chris Masond1310b22008-01-24 16:13:08 -05002426 int ret = 0;
2427 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002428 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002429 struct pagevec pvec;
2430 int nr_pages;
2431 pgoff_t index;
2432 pgoff_t end; /* Inclusive */
2433 int scanned = 0;
Josef Bacikf7aaa062011-07-15 21:26:38 +00002434 int tag;
Chris Masond1310b22008-01-24 16:13:08 -05002435
Chris Masond1310b22008-01-24 16:13:08 -05002436 pagevec_init(&pvec, 0);
2437 if (wbc->range_cyclic) {
2438 index = mapping->writeback_index; /* Start from prev offset */
2439 end = -1;
2440 } else {
2441 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2442 end = wbc->range_end >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002443 scanned = 1;
2444 }
Josef Bacikf7aaa062011-07-15 21:26:38 +00002445 if (wbc->sync_mode == WB_SYNC_ALL)
2446 tag = PAGECACHE_TAG_TOWRITE;
2447 else
2448 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05002449retry:
Josef Bacikf7aaa062011-07-15 21:26:38 +00002450 if (wbc->sync_mode == WB_SYNC_ALL)
2451 tag_pages_for_writeback(mapping, index, end);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002452 while (!done && !nr_to_write_done && (index <= end) &&
Josef Bacikf7aaa062011-07-15 21:26:38 +00002453 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2454 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
Chris Masond1310b22008-01-24 16:13:08 -05002455 unsigned i;
2456
2457 scanned = 1;
2458 for (i = 0; i < nr_pages; i++) {
2459 struct page *page = pvec.pages[i];
2460
2461 /*
2462 * At this point we hold neither mapping->tree_lock nor
2463 * lock on the page itself: the page may be truncated or
2464 * invalidated (changing page->mapping to NULL), or even
2465 * swizzled back from swapper_space to tmpfs file
2466 * mapping
2467 */
Chris Mason4bef0842008-09-08 11:18:08 -04002468 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2469 tree->ops->write_cache_pages_lock_hook(page);
2470 else
2471 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002472
2473 if (unlikely(page->mapping != mapping)) {
2474 unlock_page(page);
2475 continue;
2476 }
2477
2478 if (!wbc->range_cyclic && page->index > end) {
2479 done = 1;
2480 unlock_page(page);
2481 continue;
2482 }
2483
Chris Masond2c3f4f2008-11-19 12:44:22 -05002484 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05002485 if (PageWriteback(page))
2486 flush_fn(data);
Chris Masond1310b22008-01-24 16:13:08 -05002487 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05002488 }
Chris Masond1310b22008-01-24 16:13:08 -05002489
2490 if (PageWriteback(page) ||
2491 !clear_page_dirty_for_io(page)) {
2492 unlock_page(page);
2493 continue;
2494 }
2495
2496 ret = (*writepage)(page, wbc, data);
2497
2498 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2499 unlock_page(page);
2500 ret = 0;
2501 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002502 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002503 done = 1;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002504
2505 /*
2506 * the filesystem may choose to bump up nr_to_write.
2507 * We have to make sure to honor the new nr_to_write
2508 * at any time
2509 */
2510 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05002511 }
2512 pagevec_release(&pvec);
2513 cond_resched();
2514 }
2515 if (!scanned && !done) {
2516 /*
2517 * We hit the last page and there is more work to be done: wrap
2518 * back to the start of the file
2519 */
2520 scanned = 1;
2521 index = 0;
2522 goto retry;
2523 }
Chris Masond1310b22008-01-24 16:13:08 -05002524 return ret;
2525}
Chris Masond1310b22008-01-24 16:13:08 -05002526
Chris Masonffbd5172009-04-20 15:50:09 -04002527static void flush_epd_write_bio(struct extent_page_data *epd)
2528{
2529 if (epd->bio) {
2530 if (epd->sync_io)
2531 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2532 else
2533 submit_one_bio(WRITE, epd->bio, 0, 0);
2534 epd->bio = NULL;
2535 }
2536}
2537
Chris Masond2c3f4f2008-11-19 12:44:22 -05002538static noinline void flush_write_bio(void *data)
2539{
2540 struct extent_page_data *epd = data;
Chris Masonffbd5172009-04-20 15:50:09 -04002541 flush_epd_write_bio(epd);
Chris Masond2c3f4f2008-11-19 12:44:22 -05002542}
2543
Chris Masond1310b22008-01-24 16:13:08 -05002544int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2545 get_extent_t *get_extent,
2546 struct writeback_control *wbc)
2547{
2548 int ret;
2549 struct address_space *mapping = page->mapping;
2550 struct extent_page_data epd = {
2551 .bio = NULL,
2552 .tree = tree,
2553 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05002554 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04002555 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05002556 };
2557 struct writeback_control wbc_writepages = {
Chris Masond313d7a2009-04-20 15:50:09 -04002558 .sync_mode = wbc->sync_mode,
Chris Masond1310b22008-01-24 16:13:08 -05002559 .nr_to_write = 64,
2560 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2561 .range_end = (loff_t)-1,
2562 };
2563
Chris Masond1310b22008-01-24 16:13:08 -05002564 ret = __extent_writepage(page, wbc, &epd);
2565
Chris Mason4bef0842008-09-08 11:18:08 -04002566 extent_write_cache_pages(tree, mapping, &wbc_writepages,
Chris Masond2c3f4f2008-11-19 12:44:22 -05002567 __extent_writepage, &epd, flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04002568 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05002569 return ret;
2570}
Chris Masond1310b22008-01-24 16:13:08 -05002571
Chris Mason771ed682008-11-06 22:02:51 -05002572int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2573 u64 start, u64 end, get_extent_t *get_extent,
2574 int mode)
2575{
2576 int ret = 0;
2577 struct address_space *mapping = inode->i_mapping;
2578 struct page *page;
2579 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2580 PAGE_CACHE_SHIFT;
2581
2582 struct extent_page_data epd = {
2583 .bio = NULL,
2584 .tree = tree,
2585 .get_extent = get_extent,
2586 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04002587 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05002588 };
2589 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05002590 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05002591 .nr_to_write = nr_pages * 2,
2592 .range_start = start,
2593 .range_end = end + 1,
2594 };
2595
Chris Masond3977122009-01-05 21:25:51 -05002596 while (start <= end) {
Chris Mason771ed682008-11-06 22:02:51 -05002597 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2598 if (clear_page_dirty_for_io(page))
2599 ret = __extent_writepage(page, &wbc_writepages, &epd);
2600 else {
2601 if (tree->ops && tree->ops->writepage_end_io_hook)
2602 tree->ops->writepage_end_io_hook(page, start,
2603 start + PAGE_CACHE_SIZE - 1,
2604 NULL, 1);
2605 unlock_page(page);
2606 }
2607 page_cache_release(page);
2608 start += PAGE_CACHE_SIZE;
2609 }
2610
Chris Masonffbd5172009-04-20 15:50:09 -04002611 flush_epd_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05002612 return ret;
2613}
Chris Masond1310b22008-01-24 16:13:08 -05002614
2615int extent_writepages(struct extent_io_tree *tree,
2616 struct address_space *mapping,
2617 get_extent_t *get_extent,
2618 struct writeback_control *wbc)
2619{
2620 int ret = 0;
2621 struct extent_page_data epd = {
2622 .bio = NULL,
2623 .tree = tree,
2624 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05002625 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04002626 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05002627 };
2628
Chris Mason4bef0842008-09-08 11:18:08 -04002629 ret = extent_write_cache_pages(tree, mapping, wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05002630 __extent_writepage, &epd,
2631 flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04002632 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05002633 return ret;
2634}
Chris Masond1310b22008-01-24 16:13:08 -05002635
2636int extent_readpages(struct extent_io_tree *tree,
2637 struct address_space *mapping,
2638 struct list_head *pages, unsigned nr_pages,
2639 get_extent_t get_extent)
2640{
2641 struct bio *bio = NULL;
2642 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04002643 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002644
Chris Masond1310b22008-01-24 16:13:08 -05002645 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2646 struct page *page = list_entry(pages->prev, struct page, lru);
2647
2648 prefetchw(&page->flags);
2649 list_del(&page->lru);
Nick Piggin28ecb6092010-03-17 13:31:04 +00002650 if (!add_to_page_cache_lru(page, mapping,
Itaru Kitayama43e817a2011-04-25 19:43:51 -04002651 page->index, GFP_NOFS)) {
Chris Masonf1885912008-04-09 16:28:12 -04002652 __extent_read_full_page(tree, page, get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04002653 &bio, 0, &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002654 }
2655 page_cache_release(page);
2656 }
Chris Masond1310b22008-01-24 16:13:08 -05002657 BUG_ON(!list_empty(pages));
2658 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04002659 submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002660 return 0;
2661}
Chris Masond1310b22008-01-24 16:13:08 -05002662
2663/*
2664 * basic invalidatepage code, this waits on any locked or writeback
2665 * ranges corresponding to the page, and then deletes any extent state
2666 * records from the tree
2667 */
2668int extent_invalidatepage(struct extent_io_tree *tree,
2669 struct page *page, unsigned long offset)
2670{
Josef Bacik2ac55d42010-02-03 19:33:23 +00002671 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002672 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2673 u64 end = start + PAGE_CACHE_SIZE - 1;
2674 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2675
Chris Masond3977122009-01-05 21:25:51 -05002676 start += (offset + blocksize - 1) & ~(blocksize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05002677 if (start > end)
2678 return 0;
2679
Josef Bacik2ac55d42010-02-03 19:33:23 +00002680 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
Chris Mason1edbb732009-09-02 13:24:36 -04002681 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05002682 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04002683 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2684 EXTENT_DO_ACCOUNTING,
Josef Bacik2ac55d42010-02-03 19:33:23 +00002685 1, 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002686 return 0;
2687}
Chris Masond1310b22008-01-24 16:13:08 -05002688
2689/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04002690 * a helper for releasepage, this tests for areas of the page that
2691 * are locked or under IO and drops the related state bits if it is safe
2692 * to drop the page.
2693 */
2694int try_release_extent_state(struct extent_map_tree *map,
2695 struct extent_io_tree *tree, struct page *page,
2696 gfp_t mask)
2697{
2698 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2699 u64 end = start + PAGE_CACHE_SIZE - 1;
2700 int ret = 1;
2701
Chris Mason211f90e2008-07-18 11:56:15 -04002702 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04002703 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04002704 ret = 0;
2705 else {
2706 if ((mask & GFP_NOFS) == GFP_NOFS)
2707 mask = GFP_NOFS;
Chris Mason11ef1602009-09-23 20:28:46 -04002708 /*
2709 * at this point we can safely clear everything except the
2710 * locked bit and the nodatasum bit
2711 */
Chris Masone3f24cc2011-02-14 12:52:08 -05002712 ret = clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04002713 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2714 0, 0, NULL, mask);
Chris Masone3f24cc2011-02-14 12:52:08 -05002715
2716 /* if clear_extent_bit failed for enomem reasons,
2717 * we can't allow the release to continue.
2718 */
2719 if (ret < 0)
2720 ret = 0;
2721 else
2722 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04002723 }
2724 return ret;
2725}
Chris Mason7b13b7b2008-04-18 10:29:50 -04002726
2727/*
Chris Masond1310b22008-01-24 16:13:08 -05002728 * a helper for releasepage. As long as there are no locked extents
2729 * in the range corresponding to the page, both state records and extent
2730 * map records are removed
2731 */
2732int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05002733 struct extent_io_tree *tree, struct page *page,
2734 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05002735{
2736 struct extent_map *em;
2737 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2738 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04002739
Chris Mason70dec802008-01-29 09:59:12 -05002740 if ((mask & __GFP_WAIT) &&
2741 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05002742 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05002743 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05002744 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04002745 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05002746 em = lookup_extent_mapping(map, start, len);
David Sterbac7040052011-04-19 18:00:01 +02002747 if (IS_ERR_OR_NULL(em)) {
Chris Mason890871b2009-09-02 16:24:52 -04002748 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002749 break;
2750 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002751 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2752 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04002753 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002754 free_extent_map(em);
2755 break;
2756 }
2757 if (!test_range_bit(tree, em->start,
2758 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04002759 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04002760 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05002761 remove_extent_mapping(map, em);
2762 /* once for the rb tree */
2763 free_extent_map(em);
2764 }
2765 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04002766 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002767
2768 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05002769 free_extent_map(em);
2770 }
Chris Masond1310b22008-01-24 16:13:08 -05002771 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04002772 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05002773}
Chris Masond1310b22008-01-24 16:13:08 -05002774
Chris Masonec29ed52011-02-23 16:23:20 -05002775/*
2776 * helper function for fiemap, which doesn't want to see any holes.
2777 * This maps until we find something past 'last'
2778 */
2779static struct extent_map *get_extent_skip_holes(struct inode *inode,
2780 u64 offset,
2781 u64 last,
2782 get_extent_t *get_extent)
2783{
2784 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2785 struct extent_map *em;
2786 u64 len;
2787
2788 if (offset >= last)
2789 return NULL;
2790
2791 while(1) {
2792 len = last - offset;
2793 if (len == 0)
2794 break;
2795 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2796 em = get_extent(inode, NULL, 0, offset, len, 0);
David Sterbac7040052011-04-19 18:00:01 +02002797 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05002798 return em;
2799
2800 /* if this isn't a hole return it */
2801 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2802 em->block_start != EXTENT_MAP_HOLE) {
2803 return em;
2804 }
2805
2806 /* this is a hole, advance to the next extent */
2807 offset = extent_map_end(em);
2808 free_extent_map(em);
2809 if (offset >= last)
2810 break;
2811 }
2812 return NULL;
2813}
2814
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002815int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2816 __u64 start, __u64 len, get_extent_t *get_extent)
2817{
Josef Bacik975f84f2010-11-23 19:36:57 +00002818 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002819 u64 off = start;
2820 u64 max = start + len;
2821 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00002822 u32 found_type;
2823 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05002824 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002825 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05002826 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00002827 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002828 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00002829 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00002830 struct btrfs_path *path;
2831 struct btrfs_file_extent_item *item;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002832 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05002833 u64 em_start = 0;
2834 u64 em_len = 0;
2835 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002836 unsigned long emflags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002837
2838 if (len == 0)
2839 return -EINVAL;
2840
Josef Bacik975f84f2010-11-23 19:36:57 +00002841 path = btrfs_alloc_path();
2842 if (!path)
2843 return -ENOMEM;
2844 path->leave_spinning = 1;
2845
Chris Masonec29ed52011-02-23 16:23:20 -05002846 /*
2847 * lookup the last file extent. We're not using i_size here
2848 * because there might be preallocation past i_size
2849 */
Josef Bacik975f84f2010-11-23 19:36:57 +00002850 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
Li Zefan33345d012011-04-20 10:31:50 +08002851 path, btrfs_ino(inode), -1, 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00002852 if (ret < 0) {
2853 btrfs_free_path(path);
2854 return ret;
2855 }
2856 WARN_ON(!ret);
2857 path->slots[0]--;
2858 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2859 struct btrfs_file_extent_item);
2860 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2861 found_type = btrfs_key_type(&found_key);
2862
Chris Masonec29ed52011-02-23 16:23:20 -05002863 /* No extents, but there might be delalloc bits */
Li Zefan33345d012011-04-20 10:31:50 +08002864 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00002865 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05002866 /* have to trust i_size as the end */
2867 last = (u64)-1;
2868 last_for_get_extent = isize;
2869 } else {
2870 /*
2871 * remember the start of the last extent. There are a
2872 * bunch of different factors that go into the length of the
2873 * extent, so its much less complex to remember where it started
2874 */
2875 last = found_key.offset;
2876 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00002877 }
Josef Bacik975f84f2010-11-23 19:36:57 +00002878 btrfs_free_path(path);
2879
Chris Masonec29ed52011-02-23 16:23:20 -05002880 /*
2881 * we might have some extents allocated but more delalloc past those
2882 * extents. so, we trust isize unless the start of the last extent is
2883 * beyond isize
2884 */
2885 if (last < isize) {
2886 last = (u64)-1;
2887 last_for_get_extent = isize;
2888 }
2889
Josef Bacik2ac55d42010-02-03 19:33:23 +00002890 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2891 &cached_state, GFP_NOFS);
Chris Masonec29ed52011-02-23 16:23:20 -05002892
2893 em = get_extent_skip_holes(inode, off, last_for_get_extent,
2894 get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002895 if (!em)
2896 goto out;
2897 if (IS_ERR(em)) {
2898 ret = PTR_ERR(em);
2899 goto out;
2900 }
Josef Bacik975f84f2010-11-23 19:36:57 +00002901
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002902 while (!end) {
Chris Masonea8efc72011-03-08 11:54:40 -05002903 u64 offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002904
Chris Masonea8efc72011-03-08 11:54:40 -05002905 /* break if the extent we found is outside the range */
2906 if (em->start >= max || extent_map_end(em) < off)
2907 break;
2908
2909 /*
2910 * get_extent may return an extent that starts before our
2911 * requested range. We have to make sure the ranges
2912 * we return to fiemap always move forward and don't
2913 * overlap, so adjust the offsets here
2914 */
2915 em_start = max(em->start, off);
2916
2917 /*
2918 * record the offset from the start of the extent
2919 * for adjusting the disk offset below
2920 */
2921 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05002922 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05002923 em_len = em_end - em_start;
Chris Masonec29ed52011-02-23 16:23:20 -05002924 emflags = em->flags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002925 disko = 0;
2926 flags = 0;
2927
Chris Masonea8efc72011-03-08 11:54:40 -05002928 /*
2929 * bump off for our next call to get_extent
2930 */
2931 off = extent_map_end(em);
2932 if (off >= max)
2933 end = 1;
2934
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002935 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002936 end = 1;
2937 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002938 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002939 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2940 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002941 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002942 flags |= (FIEMAP_EXTENT_DELALLOC |
2943 FIEMAP_EXTENT_UNKNOWN);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002944 } else {
Chris Masonea8efc72011-03-08 11:54:40 -05002945 disko = em->block_start + offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002946 }
2947 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2948 flags |= FIEMAP_EXTENT_ENCODED;
2949
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002950 free_extent_map(em);
2951 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05002952 if ((em_start >= last) || em_len == (u64)-1 ||
2953 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002954 flags |= FIEMAP_EXTENT_LAST;
2955 end = 1;
2956 }
2957
Chris Masonec29ed52011-02-23 16:23:20 -05002958 /* now scan forward to see if this is really the last extent. */
2959 em = get_extent_skip_holes(inode, off, last_for_get_extent,
2960 get_extent);
2961 if (IS_ERR(em)) {
2962 ret = PTR_ERR(em);
2963 goto out;
2964 }
2965 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00002966 flags |= FIEMAP_EXTENT_LAST;
2967 end = 1;
2968 }
Chris Masonec29ed52011-02-23 16:23:20 -05002969 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2970 em_len, flags);
2971 if (ret)
2972 goto out_free;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002973 }
2974out_free:
2975 free_extent_map(em);
2976out:
Josef Bacik2ac55d42010-02-03 19:33:23 +00002977 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
2978 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002979 return ret;
2980}
2981
Chris Masond1310b22008-01-24 16:13:08 -05002982static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2983 unsigned long i)
2984{
2985 struct page *p;
2986 struct address_space *mapping;
2987
2988 if (i == 0)
2989 return eb->first_page;
2990 i += eb->start >> PAGE_CACHE_SHIFT;
2991 mapping = eb->first_page->mapping;
Chris Mason33958dc2008-07-30 10:29:12 -04002992 if (!mapping)
2993 return NULL;
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002994
2995 /*
2996 * extent_buffer_page is only called after pinning the page
2997 * by increasing the reference count. So we know the page must
2998 * be in the radix tree.
2999 */
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003000 rcu_read_lock();
Chris Masond1310b22008-01-24 16:13:08 -05003001 p = radix_tree_lookup(&mapping->page_tree, i);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003002 rcu_read_unlock();
Chris Mason2b1f55b2008-09-24 11:48:04 -04003003
Chris Masond1310b22008-01-24 16:13:08 -05003004 return p;
3005}
3006
Chris Mason6af118c2008-07-22 11:18:07 -04003007static inline unsigned long num_extent_pages(u64 start, u64 len)
Chris Masonce9adaa2008-04-09 16:28:12 -04003008{
Chris Mason6af118c2008-07-22 11:18:07 -04003009 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3010 (start >> PAGE_CACHE_SHIFT);
Chris Mason728131d2008-04-09 16:28:12 -04003011}
3012
Chris Masond1310b22008-01-24 16:13:08 -05003013static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3014 u64 start,
3015 unsigned long len,
3016 gfp_t mask)
3017{
3018 struct extent_buffer *eb = NULL;
Chris Mason39351272009-02-04 09:24:05 -05003019#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003020 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -04003021#endif
Chris Masond1310b22008-01-24 16:13:08 -05003022
Chris Masond1310b22008-01-24 16:13:08 -05003023 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Tsutomu Itoh91ca3382011-01-05 02:32:22 +00003024 if (eb == NULL)
3025 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003026 eb->start = start;
3027 eb->len = len;
Chris Masonbd681512011-07-16 15:23:14 -04003028 rwlock_init(&eb->lock);
3029 atomic_set(&eb->write_locks, 0);
3030 atomic_set(&eb->read_locks, 0);
3031 atomic_set(&eb->blocking_readers, 0);
3032 atomic_set(&eb->blocking_writers, 0);
3033 atomic_set(&eb->spinning_readers, 0);
3034 atomic_set(&eb->spinning_writers, 0);
3035 init_waitqueue_head(&eb->write_lock_wq);
3036 init_waitqueue_head(&eb->read_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -05003037
Chris Mason39351272009-02-04 09:24:05 -05003038#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003039 spin_lock_irqsave(&leak_lock, flags);
3040 list_add(&eb->leak_list, &buffers);
3041 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003042#endif
Chris Masond1310b22008-01-24 16:13:08 -05003043 atomic_set(&eb->refs, 1);
3044
3045 return eb;
3046}
3047
3048static void __free_extent_buffer(struct extent_buffer *eb)
3049{
Chris Mason39351272009-02-04 09:24:05 -05003050#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003051 unsigned long flags;
3052 spin_lock_irqsave(&leak_lock, flags);
3053 list_del(&eb->leak_list);
3054 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003055#endif
Chris Masond1310b22008-01-24 16:13:08 -05003056 kmem_cache_free(extent_buffer_cache, eb);
3057}
3058
Miao Xie897ca6e2010-10-26 20:57:29 -04003059/*
3060 * Helper for releasing extent buffer page.
3061 */
3062static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3063 unsigned long start_idx)
3064{
3065 unsigned long index;
3066 struct page *page;
3067
3068 if (!eb->first_page)
3069 return;
3070
3071 index = num_extent_pages(eb->start, eb->len);
3072 if (start_idx >= index)
3073 return;
3074
3075 do {
3076 index--;
3077 page = extent_buffer_page(eb, index);
3078 if (page)
3079 page_cache_release(page);
3080 } while (index != start_idx);
3081}
3082
3083/*
3084 * Helper for releasing the extent buffer.
3085 */
3086static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3087{
3088 btrfs_release_extent_buffer_page(eb, 0);
3089 __free_extent_buffer(eb);
3090}
3091
Chris Masond1310b22008-01-24 16:13:08 -05003092struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3093 u64 start, unsigned long len,
David Sterbaba144192011-04-21 01:12:06 +02003094 struct page *page0)
Chris Masond1310b22008-01-24 16:13:08 -05003095{
3096 unsigned long num_pages = num_extent_pages(start, len);
3097 unsigned long i;
3098 unsigned long index = start >> PAGE_CACHE_SHIFT;
3099 struct extent_buffer *eb;
Chris Mason6af118c2008-07-22 11:18:07 -04003100 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003101 struct page *p;
3102 struct address_space *mapping = tree->mapping;
3103 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04003104 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05003105
Miao Xie19fe0a82010-10-26 20:57:29 -04003106 rcu_read_lock();
3107 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3108 if (eb && atomic_inc_not_zero(&eb->refs)) {
3109 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04003110 mark_page_accessed(eb->first_page);
Chris Mason6af118c2008-07-22 11:18:07 -04003111 return eb;
3112 }
Miao Xie19fe0a82010-10-26 20:57:29 -04003113 rcu_read_unlock();
Chris Mason6af118c2008-07-22 11:18:07 -04003114
David Sterbaba144192011-04-21 01:12:06 +02003115 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
Peter2b114d12008-04-01 11:21:40 -04003116 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05003117 return NULL;
3118
Chris Masond1310b22008-01-24 16:13:08 -05003119 if (page0) {
3120 eb->first_page = page0;
3121 i = 1;
3122 index++;
3123 page_cache_get(page0);
3124 mark_page_accessed(page0);
3125 set_page_extent_mapped(page0);
Chris Masond1310b22008-01-24 16:13:08 -05003126 set_page_extent_head(page0, len);
Chris Masonf1885912008-04-09 16:28:12 -04003127 uptodate = PageUptodate(page0);
Chris Masond1310b22008-01-24 16:13:08 -05003128 } else {
3129 i = 0;
3130 }
3131 for (; i < num_pages; i++, index++) {
Chris Masona6591712011-07-19 12:04:14 -04003132 p = find_or_create_page(mapping, index, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05003133 if (!p) {
3134 WARN_ON(1);
Chris Mason6af118c2008-07-22 11:18:07 -04003135 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05003136 }
3137 set_page_extent_mapped(p);
3138 mark_page_accessed(p);
3139 if (i == 0) {
3140 eb->first_page = p;
3141 set_page_extent_head(p, len);
3142 } else {
3143 set_page_private(p, EXTENT_PAGE_PRIVATE);
3144 }
3145 if (!PageUptodate(p))
3146 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05003147
3148 /*
3149 * see below about how we avoid a nasty race with release page
3150 * and why we unlock later
3151 */
3152 if (i != 0)
3153 unlock_page(p);
Chris Masond1310b22008-01-24 16:13:08 -05003154 }
3155 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05003156 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003157
Miao Xie19fe0a82010-10-26 20:57:29 -04003158 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3159 if (ret)
3160 goto free_eb;
3161
Chris Mason6af118c2008-07-22 11:18:07 -04003162 spin_lock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003163 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3164 if (ret == -EEXIST) {
3165 exists = radix_tree_lookup(&tree->buffer,
3166 start >> PAGE_CACHE_SHIFT);
Chris Mason6af118c2008-07-22 11:18:07 -04003167 /* add one reference for the caller */
3168 atomic_inc(&exists->refs);
3169 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003170 radix_tree_preload_end();
Chris Mason6af118c2008-07-22 11:18:07 -04003171 goto free_eb;
3172 }
Chris Mason6af118c2008-07-22 11:18:07 -04003173 /* add one reference for the tree */
3174 atomic_inc(&eb->refs);
Yan, Zhengf044ba72010-02-04 08:46:56 +00003175 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003176 radix_tree_preload_end();
Chris Masoneb14ab82011-02-10 12:35:00 -05003177
3178 /*
3179 * there is a race where release page may have
3180 * tried to find this extent buffer in the radix
3181 * but failed. It will tell the VM it is safe to
3182 * reclaim the, and it will clear the page private bit.
3183 * We must make sure to set the page private bit properly
3184 * after the extent buffer is in the radix tree so
3185 * it doesn't get lost
3186 */
3187 set_page_extent_mapped(eb->first_page);
3188 set_page_extent_head(eb->first_page, eb->len);
3189 if (!page0)
3190 unlock_page(eb->first_page);
Chris Masond1310b22008-01-24 16:13:08 -05003191 return eb;
3192
Chris Mason6af118c2008-07-22 11:18:07 -04003193free_eb:
Chris Masoneb14ab82011-02-10 12:35:00 -05003194 if (eb->first_page && !page0)
3195 unlock_page(eb->first_page);
3196
Chris Masond1310b22008-01-24 16:13:08 -05003197 if (!atomic_dec_and_test(&eb->refs))
Chris Mason6af118c2008-07-22 11:18:07 -04003198 return exists;
Miao Xie897ca6e2010-10-26 20:57:29 -04003199 btrfs_release_extent_buffer(eb);
Chris Mason6af118c2008-07-22 11:18:07 -04003200 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05003201}
Chris Masond1310b22008-01-24 16:13:08 -05003202
3203struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
David Sterbaf09d1f62011-04-21 01:08:01 +02003204 u64 start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05003205{
Chris Masond1310b22008-01-24 16:13:08 -05003206 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -05003207
Miao Xie19fe0a82010-10-26 20:57:29 -04003208 rcu_read_lock();
3209 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3210 if (eb && atomic_inc_not_zero(&eb->refs)) {
3211 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04003212 mark_page_accessed(eb->first_page);
Miao Xie19fe0a82010-10-26 20:57:29 -04003213 return eb;
3214 }
3215 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04003216
Miao Xie19fe0a82010-10-26 20:57:29 -04003217 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003218}
Chris Masond1310b22008-01-24 16:13:08 -05003219
3220void free_extent_buffer(struct extent_buffer *eb)
3221{
Chris Masond1310b22008-01-24 16:13:08 -05003222 if (!eb)
3223 return;
3224
3225 if (!atomic_dec_and_test(&eb->refs))
3226 return;
3227
Chris Mason6af118c2008-07-22 11:18:07 -04003228 WARN_ON(1);
Chris Masond1310b22008-01-24 16:13:08 -05003229}
Chris Masond1310b22008-01-24 16:13:08 -05003230
3231int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3232 struct extent_buffer *eb)
3233{
Chris Masond1310b22008-01-24 16:13:08 -05003234 unsigned long i;
3235 unsigned long num_pages;
3236 struct page *page;
3237
Chris Masond1310b22008-01-24 16:13:08 -05003238 num_pages = num_extent_pages(eb->start, eb->len);
3239
3240 for (i = 0; i < num_pages; i++) {
3241 page = extent_buffer_page(eb, i);
Chris Masonb9473432009-03-13 11:00:37 -04003242 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05003243 continue;
3244
Chris Masona61e6f22008-07-22 11:18:08 -04003245 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05003246 WARN_ON(!PagePrivate(page));
3247
3248 set_page_extent_mapped(page);
Chris Masond1310b22008-01-24 16:13:08 -05003249 if (i == 0)
3250 set_page_extent_head(page, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05003251
Chris Masond1310b22008-01-24 16:13:08 -05003252 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003253 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003254 if (!PageDirty(page)) {
3255 radix_tree_tag_clear(&page->mapping->page_tree,
3256 page_index(page),
3257 PAGECACHE_TAG_DIRTY);
3258 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003259 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masona61e6f22008-07-22 11:18:08 -04003260 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003261 }
3262 return 0;
3263}
Chris Masond1310b22008-01-24 16:13:08 -05003264
Chris Masond1310b22008-01-24 16:13:08 -05003265int set_extent_buffer_dirty(struct extent_io_tree *tree,
3266 struct extent_buffer *eb)
3267{
3268 unsigned long i;
3269 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04003270 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003271
Chris Masonb9473432009-03-13 11:00:37 -04003272 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003273 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masonb9473432009-03-13 11:00:37 -04003274 for (i = 0; i < num_pages; i++)
Chris Masond1310b22008-01-24 16:13:08 -05003275 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
Chris Masonb9473432009-03-13 11:00:37 -04003276 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05003277}
Chris Masond1310b22008-01-24 16:13:08 -05003278
Chris Mason19b6caf2011-07-25 06:50:50 -04003279static int __eb_straddles_pages(u64 start, u64 len)
3280{
3281 if (len < PAGE_CACHE_SIZE)
3282 return 1;
3283 if (start & (PAGE_CACHE_SIZE - 1))
3284 return 1;
3285 if ((start + len) & (PAGE_CACHE_SIZE - 1))
3286 return 1;
3287 return 0;
3288}
3289
3290static int eb_straddles_pages(struct extent_buffer *eb)
3291{
3292 return __eb_straddles_pages(eb->start, eb->len);
3293}
3294
Chris Mason1259ab72008-05-12 13:39:03 -04003295int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003296 struct extent_buffer *eb,
3297 struct extent_state **cached_state)
Chris Mason1259ab72008-05-12 13:39:03 -04003298{
3299 unsigned long i;
3300 struct page *page;
3301 unsigned long num_pages;
3302
3303 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masonb4ce94d2009-02-04 09:25:08 -05003304 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Mason1259ab72008-05-12 13:39:03 -04003305
Chris Mason19b6caf2011-07-25 06:50:50 -04003306 if (eb_straddles_pages(eb)) {
3307 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3308 cached_state, GFP_NOFS);
3309 }
Chris Mason1259ab72008-05-12 13:39:03 -04003310 for (i = 0; i < num_pages; i++) {
3311 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04003312 if (page)
3313 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04003314 }
3315 return 0;
3316}
3317
Chris Masond1310b22008-01-24 16:13:08 -05003318int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3319 struct extent_buffer *eb)
3320{
3321 unsigned long i;
3322 struct page *page;
3323 unsigned long num_pages;
3324
3325 num_pages = num_extent_pages(eb->start, eb->len);
3326
Chris Mason19b6caf2011-07-25 06:50:50 -04003327 if (eb_straddles_pages(eb)) {
3328 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3329 NULL, GFP_NOFS);
3330 }
Chris Masond1310b22008-01-24 16:13:08 -05003331 for (i = 0; i < num_pages; i++) {
3332 page = extent_buffer_page(eb, i);
3333 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3334 ((i == num_pages - 1) &&
3335 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3336 check_page_uptodate(tree, page);
3337 continue;
3338 }
3339 SetPageUptodate(page);
3340 }
3341 return 0;
3342}
Chris Masond1310b22008-01-24 16:13:08 -05003343
Chris Masonce9adaa2008-04-09 16:28:12 -04003344int extent_range_uptodate(struct extent_io_tree *tree,
3345 u64 start, u64 end)
3346{
3347 struct page *page;
3348 int ret;
3349 int pg_uptodate = 1;
3350 int uptodate;
3351 unsigned long index;
3352
Chris Mason19b6caf2011-07-25 06:50:50 -04003353 if (__eb_straddles_pages(start, end - start + 1)) {
3354 ret = test_range_bit(tree, start, end,
3355 EXTENT_UPTODATE, 1, NULL);
3356 if (ret)
3357 return 1;
3358 }
Chris Masond3977122009-01-05 21:25:51 -05003359 while (start <= end) {
Chris Masonce9adaa2008-04-09 16:28:12 -04003360 index = start >> PAGE_CACHE_SHIFT;
3361 page = find_get_page(tree->mapping, index);
3362 uptodate = PageUptodate(page);
3363 page_cache_release(page);
3364 if (!uptodate) {
3365 pg_uptodate = 0;
3366 break;
3367 }
3368 start += PAGE_CACHE_SIZE;
3369 }
3370 return pg_uptodate;
3371}
3372
Chris Masond1310b22008-01-24 16:13:08 -05003373int extent_buffer_uptodate(struct extent_io_tree *tree,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003374 struct extent_buffer *eb,
3375 struct extent_state *cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05003376{
Chris Mason728131d2008-04-09 16:28:12 -04003377 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003378 unsigned long num_pages;
3379 unsigned long i;
Chris Mason728131d2008-04-09 16:28:12 -04003380 struct page *page;
3381 int pg_uptodate = 1;
3382
Chris Masonb4ce94d2009-02-04 09:25:08 -05003383 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Mason42352982008-04-28 16:40:52 -04003384 return 1;
Chris Mason728131d2008-04-09 16:28:12 -04003385
Chris Mason19b6caf2011-07-25 06:50:50 -04003386 if (eb_straddles_pages(eb)) {
3387 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3388 EXTENT_UPTODATE, 1, cached_state);
3389 if (ret)
3390 return ret;
3391 }
Chris Mason728131d2008-04-09 16:28:12 -04003392
3393 num_pages = num_extent_pages(eb->start, eb->len);
3394 for (i = 0; i < num_pages; i++) {
3395 page = extent_buffer_page(eb, i);
3396 if (!PageUptodate(page)) {
3397 pg_uptodate = 0;
3398 break;
3399 }
3400 }
Chris Mason42352982008-04-28 16:40:52 -04003401 return pg_uptodate;
Chris Masond1310b22008-01-24 16:13:08 -05003402}
Chris Masond1310b22008-01-24 16:13:08 -05003403
3404int read_extent_buffer_pages(struct extent_io_tree *tree,
3405 struct extent_buffer *eb,
Chris Masona86c12c2008-02-07 10:50:54 -05003406 u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04003407 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003408{
3409 unsigned long i;
3410 unsigned long start_i;
3411 struct page *page;
3412 int err;
3413 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003414 int locked_pages = 0;
3415 int all_uptodate = 1;
3416 int inc_all_pages = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003417 unsigned long num_pages;
Chris Masona86c12c2008-02-07 10:50:54 -05003418 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003419 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05003420
Chris Masonb4ce94d2009-02-04 09:25:08 -05003421 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05003422 return 0;
3423
Chris Mason19b6caf2011-07-25 06:50:50 -04003424 if (eb_straddles_pages(eb)) {
3425 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3426 EXTENT_UPTODATE, 1, NULL)) {
3427 return 0;
3428 }
Chris Masond1310b22008-01-24 16:13:08 -05003429 }
3430
3431 if (start) {
3432 WARN_ON(start < eb->start);
3433 start_i = (start >> PAGE_CACHE_SHIFT) -
3434 (eb->start >> PAGE_CACHE_SHIFT);
3435 } else {
3436 start_i = 0;
3437 }
3438
3439 num_pages = num_extent_pages(eb->start, eb->len);
3440 for (i = start_i; i < num_pages; i++) {
3441 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003442 if (!wait) {
David Woodhouse2db04962008-08-07 11:19:43 -04003443 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04003444 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05003445 } else {
3446 lock_page(page);
3447 }
Chris Masonce9adaa2008-04-09 16:28:12 -04003448 locked_pages++;
Chris Masond3977122009-01-05 21:25:51 -05003449 if (!PageUptodate(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04003450 all_uptodate = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003451 }
3452 if (all_uptodate) {
3453 if (start_i == 0)
Chris Masonb4ce94d2009-02-04 09:25:08 -05003454 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04003455 goto unlock_exit;
3456 }
3457
3458 for (i = start_i; i < num_pages; i++) {
3459 page = extent_buffer_page(eb, i);
Chris Masoneb14ab82011-02-10 12:35:00 -05003460
3461 WARN_ON(!PagePrivate(page));
3462
3463 set_page_extent_mapped(page);
3464 if (i == 0)
3465 set_page_extent_head(page, eb->len);
3466
Chris Masonce9adaa2008-04-09 16:28:12 -04003467 if (inc_all_pages)
3468 page_cache_get(page);
3469 if (!PageUptodate(page)) {
3470 if (start_i == 0)
3471 inc_all_pages = 1;
Chris Masonf1885912008-04-09 16:28:12 -04003472 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05003473 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04003474 get_extent, &bio,
Chris Masonc8b97812008-10-29 14:49:59 -04003475 mirror_num, &bio_flags);
Chris Masond3977122009-01-05 21:25:51 -05003476 if (err)
Chris Masond1310b22008-01-24 16:13:08 -05003477 ret = err;
Chris Masond1310b22008-01-24 16:13:08 -05003478 } else {
3479 unlock_page(page);
3480 }
3481 }
3482
Chris Masona86c12c2008-02-07 10:50:54 -05003483 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04003484 submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masona86c12c2008-02-07 10:50:54 -05003485
Chris Masond3977122009-01-05 21:25:51 -05003486 if (ret || !wait)
Chris Masond1310b22008-01-24 16:13:08 -05003487 return ret;
Chris Masond3977122009-01-05 21:25:51 -05003488
Chris Masond1310b22008-01-24 16:13:08 -05003489 for (i = start_i; i < num_pages; i++) {
3490 page = extent_buffer_page(eb, i);
3491 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05003492 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05003493 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05003494 }
Chris Masond3977122009-01-05 21:25:51 -05003495
Chris Masond1310b22008-01-24 16:13:08 -05003496 if (!ret)
Chris Masonb4ce94d2009-02-04 09:25:08 -05003497 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003498 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04003499
3500unlock_exit:
3501 i = start_i;
Chris Masond3977122009-01-05 21:25:51 -05003502 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04003503 page = extent_buffer_page(eb, i);
3504 i++;
3505 unlock_page(page);
3506 locked_pages--;
3507 }
3508 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003509}
Chris Masond1310b22008-01-24 16:13:08 -05003510
3511void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3512 unsigned long start,
3513 unsigned long len)
3514{
3515 size_t cur;
3516 size_t offset;
3517 struct page *page;
3518 char *kaddr;
3519 char *dst = (char *)dstv;
3520 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3521 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003522
3523 WARN_ON(start > eb->len);
3524 WARN_ON(start + len > eb->start + eb->len);
3525
3526 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3527
Chris Masond3977122009-01-05 21:25:51 -05003528 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003529 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003530
3531 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04003532 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05003533 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05003534
3535 dst += cur;
3536 len -= cur;
3537 offset = 0;
3538 i++;
3539 }
3540}
Chris Masond1310b22008-01-24 16:13:08 -05003541
3542int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masona6591712011-07-19 12:04:14 -04003543 unsigned long min_len, char **map,
Chris Masond1310b22008-01-24 16:13:08 -05003544 unsigned long *map_start,
Chris Masona6591712011-07-19 12:04:14 -04003545 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05003546{
3547 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3548 char *kaddr;
3549 struct page *p;
3550 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3551 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3552 unsigned long end_i = (start_offset + start + min_len - 1) >>
3553 PAGE_CACHE_SHIFT;
3554
3555 if (i != end_i)
3556 return -EINVAL;
3557
3558 if (i == 0) {
3559 offset = start_offset;
3560 *map_start = 0;
3561 } else {
3562 offset = 0;
3563 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3564 }
Chris Masond3977122009-01-05 21:25:51 -05003565
Chris Masond1310b22008-01-24 16:13:08 -05003566 if (start + min_len > eb->len) {
Chris Masond3977122009-01-05 21:25:51 -05003567 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3568 "wanted %lu %lu\n", (unsigned long long)eb->start,
3569 eb->len, start, min_len);
Chris Masond1310b22008-01-24 16:13:08 -05003570 WARN_ON(1);
Josef Bacik850265332011-03-15 14:52:12 -04003571 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05003572 }
3573
3574 p = extent_buffer_page(eb, i);
Chris Masona6591712011-07-19 12:04:14 -04003575 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05003576 *map = kaddr + offset;
3577 *map_len = PAGE_CACHE_SIZE - offset;
3578 return 0;
3579}
Chris Masond1310b22008-01-24 16:13:08 -05003580
Chris Masond1310b22008-01-24 16:13:08 -05003581int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3582 unsigned long start,
3583 unsigned long len)
3584{
3585 size_t cur;
3586 size_t offset;
3587 struct page *page;
3588 char *kaddr;
3589 char *ptr = (char *)ptrv;
3590 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3591 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3592 int ret = 0;
3593
3594 WARN_ON(start > eb->len);
3595 WARN_ON(start + len > eb->start + eb->len);
3596
3597 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3598
Chris Masond3977122009-01-05 21:25:51 -05003599 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003600 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003601
3602 cur = min(len, (PAGE_CACHE_SIZE - offset));
3603
Chris Masona6591712011-07-19 12:04:14 -04003604 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05003605 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05003606 if (ret)
3607 break;
3608
3609 ptr += cur;
3610 len -= cur;
3611 offset = 0;
3612 i++;
3613 }
3614 return ret;
3615}
Chris Masond1310b22008-01-24 16:13:08 -05003616
3617void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3618 unsigned long start, unsigned long len)
3619{
3620 size_t cur;
3621 size_t offset;
3622 struct page *page;
3623 char *kaddr;
3624 char *src = (char *)srcv;
3625 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3626 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3627
3628 WARN_ON(start > eb->len);
3629 WARN_ON(start + len > eb->start + eb->len);
3630
3631 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3632
Chris Masond3977122009-01-05 21:25:51 -05003633 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003634 page = extent_buffer_page(eb, i);
3635 WARN_ON(!PageUptodate(page));
3636
3637 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04003638 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05003639 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05003640
3641 src += cur;
3642 len -= cur;
3643 offset = 0;
3644 i++;
3645 }
3646}
Chris Masond1310b22008-01-24 16:13:08 -05003647
3648void memset_extent_buffer(struct extent_buffer *eb, char c,
3649 unsigned long start, unsigned long len)
3650{
3651 size_t cur;
3652 size_t offset;
3653 struct page *page;
3654 char *kaddr;
3655 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3656 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3657
3658 WARN_ON(start > eb->len);
3659 WARN_ON(start + len > eb->start + eb->len);
3660
3661 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3662
Chris Masond3977122009-01-05 21:25:51 -05003663 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003664 page = extent_buffer_page(eb, i);
3665 WARN_ON(!PageUptodate(page));
3666
3667 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04003668 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05003669 memset(kaddr + offset, c, cur);
Chris Masond1310b22008-01-24 16:13:08 -05003670
3671 len -= cur;
3672 offset = 0;
3673 i++;
3674 }
3675}
Chris Masond1310b22008-01-24 16:13:08 -05003676
3677void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3678 unsigned long dst_offset, unsigned long src_offset,
3679 unsigned long len)
3680{
3681 u64 dst_len = dst->len;
3682 size_t cur;
3683 size_t offset;
3684 struct page *page;
3685 char *kaddr;
3686 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3687 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3688
3689 WARN_ON(src->len != dst_len);
3690
3691 offset = (start_offset + dst_offset) &
3692 ((unsigned long)PAGE_CACHE_SIZE - 1);
3693
Chris Masond3977122009-01-05 21:25:51 -05003694 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003695 page = extent_buffer_page(dst, i);
3696 WARN_ON(!PageUptodate(page));
3697
3698 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3699
Chris Masona6591712011-07-19 12:04:14 -04003700 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05003701 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05003702
3703 src_offset += cur;
3704 len -= cur;
3705 offset = 0;
3706 i++;
3707 }
3708}
Chris Masond1310b22008-01-24 16:13:08 -05003709
3710static void move_pages(struct page *dst_page, struct page *src_page,
3711 unsigned long dst_off, unsigned long src_off,
3712 unsigned long len)
3713{
Chris Masona6591712011-07-19 12:04:14 -04003714 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05003715 if (dst_page == src_page) {
3716 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3717 } else {
Chris Masona6591712011-07-19 12:04:14 -04003718 char *src_kaddr = page_address(src_page);
Chris Masond1310b22008-01-24 16:13:08 -05003719 char *p = dst_kaddr + dst_off + len;
3720 char *s = src_kaddr + src_off + len;
3721
3722 while (len--)
3723 *--p = *--s;
Chris Masond1310b22008-01-24 16:13:08 -05003724 }
Chris Masond1310b22008-01-24 16:13:08 -05003725}
3726
Sergei Trofimovich33872062011-04-11 21:52:52 +00003727static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3728{
3729 unsigned long distance = (src > dst) ? src - dst : dst - src;
3730 return distance < len;
3731}
3732
Chris Masond1310b22008-01-24 16:13:08 -05003733static void copy_pages(struct page *dst_page, struct page *src_page,
3734 unsigned long dst_off, unsigned long src_off,
3735 unsigned long len)
3736{
Chris Masona6591712011-07-19 12:04:14 -04003737 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05003738 char *src_kaddr;
3739
Sergei Trofimovich33872062011-04-11 21:52:52 +00003740 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04003741 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00003742 } else {
Chris Masond1310b22008-01-24 16:13:08 -05003743 src_kaddr = dst_kaddr;
Sergei Trofimovich33872062011-04-11 21:52:52 +00003744 BUG_ON(areas_overlap(src_off, dst_off, len));
3745 }
Chris Masond1310b22008-01-24 16:13:08 -05003746
3747 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05003748}
3749
3750void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3751 unsigned long src_offset, unsigned long len)
3752{
3753 size_t cur;
3754 size_t dst_off_in_page;
3755 size_t src_off_in_page;
3756 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3757 unsigned long dst_i;
3758 unsigned long src_i;
3759
3760 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003761 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3762 "len %lu dst len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003763 BUG_ON(1);
3764 }
3765 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003766 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3767 "len %lu dst len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003768 BUG_ON(1);
3769 }
3770
Chris Masond3977122009-01-05 21:25:51 -05003771 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003772 dst_off_in_page = (start_offset + dst_offset) &
3773 ((unsigned long)PAGE_CACHE_SIZE - 1);
3774 src_off_in_page = (start_offset + src_offset) &
3775 ((unsigned long)PAGE_CACHE_SIZE - 1);
3776
3777 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3778 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3779
3780 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3781 src_off_in_page));
3782 cur = min_t(unsigned long, cur,
3783 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3784
3785 copy_pages(extent_buffer_page(dst, dst_i),
3786 extent_buffer_page(dst, src_i),
3787 dst_off_in_page, src_off_in_page, cur);
3788
3789 src_offset += cur;
3790 dst_offset += cur;
3791 len -= cur;
3792 }
3793}
Chris Masond1310b22008-01-24 16:13:08 -05003794
3795void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3796 unsigned long src_offset, unsigned long len)
3797{
3798 size_t cur;
3799 size_t dst_off_in_page;
3800 size_t src_off_in_page;
3801 unsigned long dst_end = dst_offset + len - 1;
3802 unsigned long src_end = src_offset + len - 1;
3803 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3804 unsigned long dst_i;
3805 unsigned long src_i;
3806
3807 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003808 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3809 "len %lu len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003810 BUG_ON(1);
3811 }
3812 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003813 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3814 "len %lu len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003815 BUG_ON(1);
3816 }
Sergei Trofimovich33872062011-04-11 21:52:52 +00003817 if (!areas_overlap(src_offset, dst_offset, len)) {
Chris Masond1310b22008-01-24 16:13:08 -05003818 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3819 return;
3820 }
Chris Masond3977122009-01-05 21:25:51 -05003821 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003822 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3823 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3824
3825 dst_off_in_page = (start_offset + dst_end) &
3826 ((unsigned long)PAGE_CACHE_SIZE - 1);
3827 src_off_in_page = (start_offset + src_end) &
3828 ((unsigned long)PAGE_CACHE_SIZE - 1);
3829
3830 cur = min_t(unsigned long, len, src_off_in_page + 1);
3831 cur = min(cur, dst_off_in_page + 1);
3832 move_pages(extent_buffer_page(dst, dst_i),
3833 extent_buffer_page(dst, src_i),
3834 dst_off_in_page - cur + 1,
3835 src_off_in_page - cur + 1, cur);
3836
3837 dst_end -= cur;
3838 src_end -= cur;
3839 len -= cur;
3840 }
3841}
Chris Mason6af118c2008-07-22 11:18:07 -04003842
Miao Xie19fe0a82010-10-26 20:57:29 -04003843static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3844{
3845 struct extent_buffer *eb =
3846 container_of(head, struct extent_buffer, rcu_head);
3847
3848 btrfs_release_extent_buffer(eb);
3849}
3850
Chris Mason6af118c2008-07-22 11:18:07 -04003851int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3852{
3853 u64 start = page_offset(page);
3854 struct extent_buffer *eb;
3855 int ret = 1;
Chris Mason6af118c2008-07-22 11:18:07 -04003856
3857 spin_lock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003858 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
Chris Mason45f49bc2010-11-21 22:27:44 -05003859 if (!eb) {
3860 spin_unlock(&tree->buffer_lock);
3861 return ret;
3862 }
Chris Mason6af118c2008-07-22 11:18:07 -04003863
Chris Masonb9473432009-03-13 11:00:37 -04003864 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3865 ret = 0;
3866 goto out;
3867 }
Miao Xie897ca6e2010-10-26 20:57:29 -04003868
Miao Xie19fe0a82010-10-26 20:57:29 -04003869 /*
3870 * set @eb->refs to 0 if it is already 1, and then release the @eb.
3871 * Or go back.
3872 */
3873 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
3874 ret = 0;
3875 goto out;
3876 }
3877
3878 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
Chris Mason6af118c2008-07-22 11:18:07 -04003879out:
3880 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003881
3882 /* at this point we can safely release the extent buffer */
3883 if (atomic_read(&eb->refs) == 0)
3884 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Chris Mason6af118c2008-07-22 11:18:07 -04003885 return ret;
3886}