blob: 8624f3e880364496858b4cb4fbd9d0f98deabdfc [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
David Woodhouse2db04962008-08-07 11:19:43 -040017#include "compat.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040018#include "ctree.h"
19#include "btrfs_inode.h"
Chris Masond1310b22008-01-24 16:13:08 -050020
21/* temporary define until extent_map moves out of btrfs */
22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
25 unsigned long));
26
27static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
29
30static LIST_HEAD(buffers);
31static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040032
33#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -040034static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
Chris Mason4bef0842008-09-08 11:18:08 -040035#endif
Chris Masond1310b22008-01-24 16:13:08 -050036
Chris Masond1310b22008-01-24 16:13:08 -050037#define BUFFER_LRU_MAX 64
38
39struct tree_entry {
40 u64 start;
41 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -050042 struct rb_node rb_node;
43};
44
45struct extent_page_data {
46 struct bio *bio;
47 struct extent_io_tree *tree;
48 get_extent_t *get_extent;
49};
50
51int __init extent_io_init(void)
52{
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
55 NULL);
56 if (!extent_state_cache)
57 return -ENOMEM;
58
59 extent_buffer_cache = btrfs_cache_create("extent_buffers",
60 sizeof(struct extent_buffer), 0,
61 NULL);
62 if (!extent_buffer_cache)
63 goto free_state_cache;
64 return 0;
65
66free_state_cache:
67 kmem_cache_destroy(extent_state_cache);
68 return -ENOMEM;
69}
70
71void extent_io_exit(void)
72{
73 struct extent_state *state;
Chris Mason2d2ae542008-03-26 16:24:23 -040074 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -050075
76 while (!list_empty(&states)) {
Chris Mason2d2ae542008-03-26 16:24:23 -040077 state = list_entry(states.next, struct extent_state, leak_list);
Chris Mason70dec802008-01-29 09:59:12 -050078 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040079 list_del(&state->leak_list);
Chris Masond1310b22008-01-24 16:13:08 -050080 kmem_cache_free(extent_state_cache, state);
81
82 }
83
Chris Mason2d2ae542008-03-26 16:24:23 -040084 while (!list_empty(&buffers)) {
85 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
86 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
87 list_del(&eb->leak_list);
88 kmem_cache_free(extent_buffer_cache, eb);
89 }
Chris Masond1310b22008-01-24 16:13:08 -050090 if (extent_state_cache)
91 kmem_cache_destroy(extent_state_cache);
92 if (extent_buffer_cache)
93 kmem_cache_destroy(extent_buffer_cache);
94}
95
96void extent_io_tree_init(struct extent_io_tree *tree,
97 struct address_space *mapping, gfp_t mask)
98{
99 tree->state.rb_node = NULL;
Chris Mason6af118c2008-07-22 11:18:07 -0400100 tree->buffer.rb_node = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500101 tree->ops = NULL;
102 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500103 spin_lock_init(&tree->lock);
Chris Mason6af118c2008-07-22 11:18:07 -0400104 spin_lock_init(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -0500105 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500106}
107EXPORT_SYMBOL(extent_io_tree_init);
108
Chris Masond1310b22008-01-24 16:13:08 -0500109struct extent_state *alloc_extent_state(gfp_t mask)
110{
111 struct extent_state *state;
Chris Mason4bef0842008-09-08 11:18:08 -0400112#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400113 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400114#endif
Chris Masond1310b22008-01-24 16:13:08 -0500115
116 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400117 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500118 return state;
119 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500120 state->private = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500121 state->tree = NULL;
Chris Mason4bef0842008-09-08 11:18:08 -0400122#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400123 spin_lock_irqsave(&leak_lock, flags);
124 list_add(&state->leak_list, &states);
125 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400126#endif
Chris Masond1310b22008-01-24 16:13:08 -0500127 atomic_set(&state->refs, 1);
128 init_waitqueue_head(&state->wq);
129 return state;
130}
131EXPORT_SYMBOL(alloc_extent_state);
132
133void free_extent_state(struct extent_state *state)
134{
Chris Masond1310b22008-01-24 16:13:08 -0500135 if (!state)
136 return;
137 if (atomic_dec_and_test(&state->refs)) {
Chris Mason4bef0842008-09-08 11:18:08 -0400138#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400139 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400140#endif
Chris Mason70dec802008-01-29 09:59:12 -0500141 WARN_ON(state->tree);
Chris Mason4bef0842008-09-08 11:18:08 -0400142#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400143 spin_lock_irqsave(&leak_lock, flags);
144 list_del(&state->leak_list);
145 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400146#endif
Chris Masond1310b22008-01-24 16:13:08 -0500147 kmem_cache_free(extent_state_cache, state);
148 }
149}
150EXPORT_SYMBOL(free_extent_state);
151
152static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
153 struct rb_node *node)
154{
155 struct rb_node ** p = &root->rb_node;
156 struct rb_node * parent = NULL;
157 struct tree_entry *entry;
158
159 while(*p) {
160 parent = *p;
161 entry = rb_entry(parent, struct tree_entry, rb_node);
162
163 if (offset < entry->start)
164 p = &(*p)->rb_left;
165 else if (offset > entry->end)
166 p = &(*p)->rb_right;
167 else
168 return parent;
169 }
170
171 entry = rb_entry(node, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500172 rb_link_node(node, parent, p);
173 rb_insert_color(node, root);
174 return NULL;
175}
176
Chris Mason80ea96b2008-02-01 14:51:59 -0500177static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Chris Masond1310b22008-01-24 16:13:08 -0500178 struct rb_node **prev_ret,
179 struct rb_node **next_ret)
180{
Chris Mason80ea96b2008-02-01 14:51:59 -0500181 struct rb_root *root = &tree->state;
Chris Masond1310b22008-01-24 16:13:08 -0500182 struct rb_node * n = root->rb_node;
183 struct rb_node *prev = NULL;
184 struct rb_node *orig_prev = NULL;
185 struct tree_entry *entry;
186 struct tree_entry *prev_entry = NULL;
187
188 while(n) {
189 entry = rb_entry(n, struct tree_entry, rb_node);
190 prev = n;
191 prev_entry = entry;
192
193 if (offset < entry->start)
194 n = n->rb_left;
195 else if (offset > entry->end)
196 n = n->rb_right;
Chris Mason80ea96b2008-02-01 14:51:59 -0500197 else {
Chris Masond1310b22008-01-24 16:13:08 -0500198 return n;
Chris Mason80ea96b2008-02-01 14:51:59 -0500199 }
Chris Masond1310b22008-01-24 16:13:08 -0500200 }
201
202 if (prev_ret) {
203 orig_prev = prev;
204 while(prev && offset > prev_entry->end) {
205 prev = rb_next(prev);
206 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
207 }
208 *prev_ret = prev;
209 prev = orig_prev;
210 }
211
212 if (next_ret) {
213 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 while(prev && offset < prev_entry->start) {
215 prev = rb_prev(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217 }
218 *next_ret = prev;
219 }
220 return NULL;
221}
222
Chris Mason80ea96b2008-02-01 14:51:59 -0500223static inline struct rb_node *tree_search(struct extent_io_tree *tree,
224 u64 offset)
Chris Masond1310b22008-01-24 16:13:08 -0500225{
Chris Mason70dec802008-01-29 09:59:12 -0500226 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500227 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500228
Chris Mason80ea96b2008-02-01 14:51:59 -0500229 ret = __etree_search(tree, offset, &prev, NULL);
230 if (!ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500231 return prev;
Chris Mason80ea96b2008-02-01 14:51:59 -0500232 }
Chris Masond1310b22008-01-24 16:13:08 -0500233 return ret;
234}
235
Chris Mason6af118c2008-07-22 11:18:07 -0400236static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
237 u64 offset, struct rb_node *node)
238{
239 struct rb_root *root = &tree->buffer;
240 struct rb_node ** p = &root->rb_node;
241 struct rb_node * parent = NULL;
242 struct extent_buffer *eb;
243
244 while(*p) {
245 parent = *p;
246 eb = rb_entry(parent, struct extent_buffer, rb_node);
247
248 if (offset < eb->start)
249 p = &(*p)->rb_left;
250 else if (offset > eb->start)
251 p = &(*p)->rb_right;
252 else
253 return eb;
254 }
255
256 rb_link_node(node, parent, p);
257 rb_insert_color(node, root);
258 return NULL;
259}
260
261static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
262 u64 offset)
263{
264 struct rb_root *root = &tree->buffer;
265 struct rb_node * n = root->rb_node;
266 struct extent_buffer *eb;
267
268 while(n) {
269 eb = rb_entry(n, struct extent_buffer, rb_node);
270 if (offset < eb->start)
271 n = n->rb_left;
272 else if (offset > eb->start)
273 n = n->rb_right;
274 else
275 return eb;
276 }
277 return NULL;
278}
279
Chris Masond1310b22008-01-24 16:13:08 -0500280/*
281 * utility function to look for merge candidates inside a given range.
282 * Any extents with matching state are merged together into a single
283 * extent in the tree. Extents with EXTENT_IO in their state field
284 * are not merged because the end_io handlers need to be able to do
285 * operations on them without sleeping (or doing allocations/splits).
286 *
287 * This should be called with the tree lock held.
288 */
289static int merge_state(struct extent_io_tree *tree,
290 struct extent_state *state)
291{
292 struct extent_state *other;
293 struct rb_node *other_node;
294
295 if (state->state & EXTENT_IOBITS)
296 return 0;
297
298 other_node = rb_prev(&state->rb_node);
299 if (other_node) {
300 other = rb_entry(other_node, struct extent_state, rb_node);
301 if (other->end == state->start - 1 &&
302 other->state == state->state) {
303 state->start = other->start;
Chris Mason70dec802008-01-29 09:59:12 -0500304 other->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500305 rb_erase(&other->rb_node, &tree->state);
306 free_extent_state(other);
307 }
308 }
309 other_node = rb_next(&state->rb_node);
310 if (other_node) {
311 other = rb_entry(other_node, struct extent_state, rb_node);
312 if (other->start == state->end + 1 &&
313 other->state == state->state) {
314 other->start = state->start;
Chris Mason70dec802008-01-29 09:59:12 -0500315 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500316 rb_erase(&state->rb_node, &tree->state);
317 free_extent_state(state);
318 }
319 }
320 return 0;
321}
322
Chris Mason291d6732008-01-29 15:55:23 -0500323static void set_state_cb(struct extent_io_tree *tree,
324 struct extent_state *state,
325 unsigned long bits)
326{
327 if (tree->ops && tree->ops->set_bit_hook) {
328 tree->ops->set_bit_hook(tree->mapping->host, state->start,
Chris Masonb0c68f82008-01-31 11:05:37 -0500329 state->end, state->state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500330 }
331}
332
333static void clear_state_cb(struct extent_io_tree *tree,
334 struct extent_state *state,
335 unsigned long bits)
336{
337 if (tree->ops && tree->ops->set_bit_hook) {
338 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
Chris Masonb0c68f82008-01-31 11:05:37 -0500339 state->end, state->state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500340 }
341}
342
Chris Masond1310b22008-01-24 16:13:08 -0500343/*
344 * insert an extent_state struct into the tree. 'bits' are set on the
345 * struct before it is inserted.
346 *
347 * This may return -EEXIST if the extent is already there, in which case the
348 * state struct is freed.
349 *
350 * The tree lock is not taken internally. This is a utility function and
351 * probably isn't what you want to call (see set/clear_extent_bit).
352 */
353static int insert_state(struct extent_io_tree *tree,
354 struct extent_state *state, u64 start, u64 end,
355 int bits)
356{
357 struct rb_node *node;
358
359 if (end < start) {
360 printk("end < start %Lu %Lu\n", end, start);
361 WARN_ON(1);
362 }
363 if (bits & EXTENT_DIRTY)
364 tree->dirty_bytes += end - start + 1;
Chris Masonb0c68f82008-01-31 11:05:37 -0500365 set_state_cb(tree, state, bits);
Chris Masond1310b22008-01-24 16:13:08 -0500366 state->state |= bits;
367 state->start = start;
368 state->end = end;
369 node = tree_insert(&tree->state, end, &state->rb_node);
370 if (node) {
371 struct extent_state *found;
372 found = rb_entry(node, struct extent_state, rb_node);
373 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
374 free_extent_state(state);
375 return -EEXIST;
376 }
Chris Mason70dec802008-01-29 09:59:12 -0500377 state->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500378 merge_state(tree, state);
379 return 0;
380}
381
382/*
383 * split a given extent state struct in two, inserting the preallocated
384 * struct 'prealloc' as the newly created second half. 'split' indicates an
385 * offset inside 'orig' where it should be split.
386 *
387 * Before calling,
388 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
389 * are two extent state structs in the tree:
390 * prealloc: [orig->start, split - 1]
391 * orig: [ split, orig->end ]
392 *
393 * The tree locks are not taken by this function. They need to be held
394 * by the caller.
395 */
396static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
397 struct extent_state *prealloc, u64 split)
398{
399 struct rb_node *node;
400 prealloc->start = orig->start;
401 prealloc->end = split - 1;
402 prealloc->state = orig->state;
403 orig->start = split;
404
405 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
406 if (node) {
407 struct extent_state *found;
408 found = rb_entry(node, struct extent_state, rb_node);
409 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
410 free_extent_state(prealloc);
411 return -EEXIST;
412 }
Chris Mason70dec802008-01-29 09:59:12 -0500413 prealloc->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500414 return 0;
415}
416
417/*
418 * utility function to clear some bits in an extent state struct.
419 * it will optionally wake up any one waiting on this state (wake == 1), or
420 * forcibly remove the state from the tree (delete == 1).
421 *
422 * If no bits are set on the state struct after clearing things, the
423 * struct is freed and removed from the tree
424 */
425static int clear_state_bit(struct extent_io_tree *tree,
426 struct extent_state *state, int bits, int wake,
427 int delete)
428{
429 int ret = state->state & bits;
430
431 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
432 u64 range = state->end - state->start + 1;
433 WARN_ON(range > tree->dirty_bytes);
434 tree->dirty_bytes -= range;
435 }
Chris Mason291d6732008-01-29 15:55:23 -0500436 clear_state_cb(tree, state, bits);
Chris Masonb0c68f82008-01-31 11:05:37 -0500437 state->state &= ~bits;
Chris Masond1310b22008-01-24 16:13:08 -0500438 if (wake)
439 wake_up(&state->wq);
440 if (delete || state->state == 0) {
Chris Mason70dec802008-01-29 09:59:12 -0500441 if (state->tree) {
Chris Masonae9d1282008-02-01 15:42:15 -0500442 clear_state_cb(tree, state, state->state);
Chris Masond1310b22008-01-24 16:13:08 -0500443 rb_erase(&state->rb_node, &tree->state);
Chris Mason70dec802008-01-29 09:59:12 -0500444 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500445 free_extent_state(state);
446 } else {
447 WARN_ON(1);
448 }
449 } else {
450 merge_state(tree, state);
451 }
452 return ret;
453}
454
455/*
456 * clear some bits on a range in the tree. This may require splitting
457 * or inserting elements in the tree, so the gfp mask is used to
458 * indicate which allocations or sleeping are allowed.
459 *
460 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
461 * the given range from the tree regardless of state (ie for truncate).
462 *
463 * the range [start, end] is inclusive.
464 *
465 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
466 * bits were already set, or zero if none of the bits were already set.
467 */
468int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
469 int bits, int wake, int delete, gfp_t mask)
470{
471 struct extent_state *state;
472 struct extent_state *prealloc = NULL;
473 struct rb_node *node;
474 unsigned long flags;
475 int err;
476 int set = 0;
477
478again:
479 if (!prealloc && (mask & __GFP_WAIT)) {
480 prealloc = alloc_extent_state(mask);
481 if (!prealloc)
482 return -ENOMEM;
483 }
484
Chris Mason70dec802008-01-29 09:59:12 -0500485 spin_lock_irqsave(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500486 /*
487 * this search will find the extents that end after
488 * our range starts
489 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500490 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500491 if (!node)
492 goto out;
493 state = rb_entry(node, struct extent_state, rb_node);
494 if (state->start > end)
495 goto out;
496 WARN_ON(state->end < start);
497
498 /*
499 * | ---- desired range ---- |
500 * | state | or
501 * | ------------- state -------------- |
502 *
503 * We need to split the extent we found, and may flip
504 * bits on second half.
505 *
506 * If the extent we found extends past our range, we
507 * just split and search again. It'll get split again
508 * the next time though.
509 *
510 * If the extent we found is inside our range, we clear
511 * the desired bit on it.
512 */
513
514 if (state->start < start) {
Chris Mason70dec802008-01-29 09:59:12 -0500515 if (!prealloc)
516 prealloc = alloc_extent_state(GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500517 err = split_state(tree, state, prealloc, start);
518 BUG_ON(err == -EEXIST);
519 prealloc = NULL;
520 if (err)
521 goto out;
522 if (state->end <= end) {
523 start = state->end + 1;
524 set |= clear_state_bit(tree, state, bits,
525 wake, delete);
526 } else {
527 start = state->start;
528 }
529 goto search_again;
530 }
531 /*
532 * | ---- desired range ---- |
533 * | state |
534 * We need to split the extent, and clear the bit
535 * on the first half
536 */
537 if (state->start <= end && state->end > end) {
Chris Mason70dec802008-01-29 09:59:12 -0500538 if (!prealloc)
539 prealloc = alloc_extent_state(GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500540 err = split_state(tree, state, prealloc, end + 1);
541 BUG_ON(err == -EEXIST);
542
543 if (wake)
544 wake_up(&state->wq);
545 set |= clear_state_bit(tree, prealloc, bits,
546 wake, delete);
547 prealloc = NULL;
548 goto out;
549 }
550
551 start = state->end + 1;
552 set |= clear_state_bit(tree, state, bits, wake, delete);
553 goto search_again;
554
555out:
Chris Mason70dec802008-01-29 09:59:12 -0500556 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500557 if (prealloc)
558 free_extent_state(prealloc);
559
560 return set;
561
562search_again:
563 if (start > end)
564 goto out;
Chris Mason70dec802008-01-29 09:59:12 -0500565 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500566 if (mask & __GFP_WAIT)
567 cond_resched();
568 goto again;
569}
570EXPORT_SYMBOL(clear_extent_bit);
571
572static int wait_on_state(struct extent_io_tree *tree,
573 struct extent_state *state)
574{
575 DEFINE_WAIT(wait);
576 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Mason70dec802008-01-29 09:59:12 -0500577 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500578 schedule();
Chris Mason70dec802008-01-29 09:59:12 -0500579 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500580 finish_wait(&state->wq, &wait);
581 return 0;
582}
583
584/*
585 * waits for one or more bits to clear on a range in the state tree.
586 * The range [start, end] is inclusive.
587 * The tree lock is taken by this function
588 */
589int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
590{
591 struct extent_state *state;
592 struct rb_node *node;
593
Chris Mason70dec802008-01-29 09:59:12 -0500594 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500595again:
596 while (1) {
597 /*
598 * this search will find all the extents that end after
599 * our range starts
600 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500601 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500602 if (!node)
603 break;
604
605 state = rb_entry(node, struct extent_state, rb_node);
606
607 if (state->start > end)
608 goto out;
609
610 if (state->state & bits) {
611 start = state->start;
612 atomic_inc(&state->refs);
613 wait_on_state(tree, state);
614 free_extent_state(state);
615 goto again;
616 }
617 start = state->end + 1;
618
619 if (start > end)
620 break;
621
622 if (need_resched()) {
Chris Mason70dec802008-01-29 09:59:12 -0500623 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500624 cond_resched();
Chris Mason70dec802008-01-29 09:59:12 -0500625 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500626 }
627 }
628out:
Chris Mason70dec802008-01-29 09:59:12 -0500629 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500630 return 0;
631}
632EXPORT_SYMBOL(wait_extent_bit);
633
634static void set_state_bits(struct extent_io_tree *tree,
635 struct extent_state *state,
636 int bits)
637{
638 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
639 u64 range = state->end - state->start + 1;
640 tree->dirty_bytes += range;
641 }
Chris Mason291d6732008-01-29 15:55:23 -0500642 set_state_cb(tree, state, bits);
Chris Masonb0c68f82008-01-31 11:05:37 -0500643 state->state |= bits;
Chris Masond1310b22008-01-24 16:13:08 -0500644}
645
646/*
647 * set some bits on a range in the tree. This may require allocations
648 * or sleeping, so the gfp mask is used to indicate what is allowed.
649 *
650 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651 * range already has the desired bits set. The start of the existing
652 * range is returned in failed_start in this case.
653 *
654 * [start, end] is inclusive
655 * This takes the tree lock.
656 */
657int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
658 int exclusive, u64 *failed_start, gfp_t mask)
659{
660 struct extent_state *state;
661 struct extent_state *prealloc = NULL;
662 struct rb_node *node;
663 unsigned long flags;
664 int err = 0;
665 int set;
666 u64 last_start;
667 u64 last_end;
668again:
669 if (!prealloc && (mask & __GFP_WAIT)) {
670 prealloc = alloc_extent_state(mask);
671 if (!prealloc)
672 return -ENOMEM;
673 }
674
Chris Mason70dec802008-01-29 09:59:12 -0500675 spin_lock_irqsave(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500676 /*
677 * this search will find all the extents that end after
678 * our range starts.
679 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500680 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500681 if (!node) {
682 err = insert_state(tree, prealloc, start, end, bits);
683 prealloc = NULL;
684 BUG_ON(err == -EEXIST);
685 goto out;
686 }
687
688 state = rb_entry(node, struct extent_state, rb_node);
689 last_start = state->start;
690 last_end = state->end;
691
692 /*
693 * | ---- desired range ---- |
694 * | state |
695 *
696 * Just lock what we found and keep going
697 */
698 if (state->start == start && state->end <= end) {
699 set = state->state & bits;
700 if (set && exclusive) {
701 *failed_start = state->start;
702 err = -EEXIST;
703 goto out;
704 }
705 set_state_bits(tree, state, bits);
706 start = state->end + 1;
707 merge_state(tree, state);
708 goto search_again;
709 }
710
711 /*
712 * | ---- desired range ---- |
713 * | state |
714 * or
715 * | ------------- state -------------- |
716 *
717 * We need to split the extent we found, and may flip bits on
718 * second half.
719 *
720 * If the extent we found extends past our
721 * range, we just split and search again. It'll get split
722 * again the next time though.
723 *
724 * If the extent we found is inside our range, we set the
725 * desired bit on it.
726 */
727 if (state->start < start) {
728 set = state->state & bits;
729 if (exclusive && set) {
730 *failed_start = start;
731 err = -EEXIST;
732 goto out;
733 }
734 err = split_state(tree, state, prealloc, start);
735 BUG_ON(err == -EEXIST);
736 prealloc = NULL;
737 if (err)
738 goto out;
739 if (state->end <= end) {
740 set_state_bits(tree, state, bits);
741 start = state->end + 1;
742 merge_state(tree, state);
743 } else {
744 start = state->start;
745 }
746 goto search_again;
747 }
748 /*
749 * | ---- desired range ---- |
750 * | state | or | state |
751 *
752 * There's a hole, we need to insert something in it and
753 * ignore the extent we found.
754 */
755 if (state->start > start) {
756 u64 this_end;
757 if (end < last_start)
758 this_end = end;
759 else
760 this_end = last_start -1;
761 err = insert_state(tree, prealloc, start, this_end,
762 bits);
763 prealloc = NULL;
764 BUG_ON(err == -EEXIST);
765 if (err)
766 goto out;
767 start = this_end + 1;
768 goto search_again;
769 }
770 /*
771 * | ---- desired range ---- |
772 * | state |
773 * We need to split the extent, and set the bit
774 * on the first half
775 */
776 if (state->start <= end && state->end > end) {
777 set = state->state & bits;
778 if (exclusive && set) {
779 *failed_start = start;
780 err = -EEXIST;
781 goto out;
782 }
783 err = split_state(tree, state, prealloc, end + 1);
784 BUG_ON(err == -EEXIST);
785
786 set_state_bits(tree, prealloc, bits);
787 merge_state(tree, prealloc);
788 prealloc = NULL;
789 goto out;
790 }
791
792 goto search_again;
793
794out:
Chris Mason70dec802008-01-29 09:59:12 -0500795 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500796 if (prealloc)
797 free_extent_state(prealloc);
798
799 return err;
800
801search_again:
802 if (start > end)
803 goto out;
Chris Mason70dec802008-01-29 09:59:12 -0500804 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500805 if (mask & __GFP_WAIT)
806 cond_resched();
807 goto again;
808}
809EXPORT_SYMBOL(set_extent_bit);
810
811/* wrappers around set/clear extent bit */
812int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
813 gfp_t mask)
814{
815 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
816 mask);
817}
818EXPORT_SYMBOL(set_extent_dirty);
819
Chris Masone6dcd2d2008-07-17 12:53:50 -0400820int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
821 gfp_t mask)
822{
823 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
824}
825EXPORT_SYMBOL(set_extent_ordered);
826
Chris Masond1310b22008-01-24 16:13:08 -0500827int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
828 int bits, gfp_t mask)
829{
830 return set_extent_bit(tree, start, end, bits, 0, NULL,
831 mask);
832}
833EXPORT_SYMBOL(set_extent_bits);
834
835int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
836 int bits, gfp_t mask)
837{
838 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
839}
840EXPORT_SYMBOL(clear_extent_bits);
841
842int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
843 gfp_t mask)
844{
845 return set_extent_bit(tree, start, end,
Chris Masone6dcd2d2008-07-17 12:53:50 -0400846 EXTENT_DELALLOC | EXTENT_DIRTY,
847 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500848}
849EXPORT_SYMBOL(set_extent_delalloc);
850
851int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
852 gfp_t mask)
853{
854 return clear_extent_bit(tree, start, end,
855 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
856}
857EXPORT_SYMBOL(clear_extent_dirty);
858
Chris Masone6dcd2d2008-07-17 12:53:50 -0400859int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
860 gfp_t mask)
861{
862 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
863}
864EXPORT_SYMBOL(clear_extent_ordered);
865
Chris Masond1310b22008-01-24 16:13:08 -0500866int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
867 gfp_t mask)
868{
869 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
870 mask);
871}
872EXPORT_SYMBOL(set_extent_new);
873
874int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
875 gfp_t mask)
876{
877 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
878}
879EXPORT_SYMBOL(clear_extent_new);
880
881int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
882 gfp_t mask)
883{
884 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
885 mask);
886}
887EXPORT_SYMBOL(set_extent_uptodate);
888
889int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
890 gfp_t mask)
891{
892 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
893}
894EXPORT_SYMBOL(clear_extent_uptodate);
895
896int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
897 gfp_t mask)
898{
899 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
900 0, NULL, mask);
901}
902EXPORT_SYMBOL(set_extent_writeback);
903
904int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
905 gfp_t mask)
906{
907 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
908}
909EXPORT_SYMBOL(clear_extent_writeback);
910
911int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
912{
913 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
914}
915EXPORT_SYMBOL(wait_on_extent_writeback);
916
Chris Masond1310b22008-01-24 16:13:08 -0500917int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
918{
919 int err;
920 u64 failed_start;
921 while (1) {
922 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
923 &failed_start, mask);
924 if (err == -EEXIST && (mask & __GFP_WAIT)) {
925 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
926 start = failed_start;
927 } else {
928 break;
929 }
930 WARN_ON(start > end);
931 }
932 return err;
933}
934EXPORT_SYMBOL(lock_extent);
935
936int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
937 gfp_t mask)
938{
939 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
940}
941EXPORT_SYMBOL(unlock_extent);
942
943/*
944 * helper function to set pages and extents in the tree dirty
945 */
946int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
947{
948 unsigned long index = start >> PAGE_CACHE_SHIFT;
949 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
950 struct page *page;
951
952 while (index <= end_index) {
953 page = find_get_page(tree->mapping, index);
954 BUG_ON(!page);
955 __set_page_dirty_nobuffers(page);
956 page_cache_release(page);
957 index++;
958 }
959 set_extent_dirty(tree, start, end, GFP_NOFS);
960 return 0;
961}
962EXPORT_SYMBOL(set_range_dirty);
963
964/*
965 * helper function to set both pages and extents in the tree writeback
966 */
967int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
968{
969 unsigned long index = start >> PAGE_CACHE_SHIFT;
970 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
971 struct page *page;
972
973 while (index <= end_index) {
974 page = find_get_page(tree->mapping, index);
975 BUG_ON(!page);
976 set_page_writeback(page);
977 page_cache_release(page);
978 index++;
979 }
980 set_extent_writeback(tree, start, end, GFP_NOFS);
981 return 0;
982}
983EXPORT_SYMBOL(set_range_writeback);
984
985int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
986 u64 *start_ret, u64 *end_ret, int bits)
987{
988 struct rb_node *node;
989 struct extent_state *state;
990 int ret = 1;
991
Chris Mason70dec802008-01-29 09:59:12 -0500992 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500993 /*
994 * this search will find all the extents that end after
995 * our range starts.
996 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500997 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -0400998 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -0500999 goto out;
1000 }
1001
1002 while(1) {
1003 state = rb_entry(node, struct extent_state, rb_node);
1004 if (state->end >= start && (state->state & bits)) {
1005 *start_ret = state->start;
1006 *end_ret = state->end;
1007 ret = 0;
1008 break;
1009 }
1010 node = rb_next(node);
1011 if (!node)
1012 break;
1013 }
1014out:
Chris Mason70dec802008-01-29 09:59:12 -05001015 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001016 return ret;
1017}
1018EXPORT_SYMBOL(find_first_extent_bit);
1019
Chris Masond7fc6402008-02-18 12:12:38 -05001020struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1021 u64 start, int bits)
1022{
1023 struct rb_node *node;
1024 struct extent_state *state;
1025
1026 /*
1027 * this search will find all the extents that end after
1028 * our range starts.
1029 */
1030 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001031 if (!node) {
Chris Masond7fc6402008-02-18 12:12:38 -05001032 goto out;
1033 }
1034
1035 while(1) {
1036 state = rb_entry(node, struct extent_state, rb_node);
1037 if (state->end >= start && (state->state & bits)) {
1038 return state;
1039 }
1040 node = rb_next(node);
1041 if (!node)
1042 break;
1043 }
1044out:
1045 return NULL;
1046}
1047EXPORT_SYMBOL(find_first_extent_bit_state);
1048
Chris Masond1310b22008-01-24 16:13:08 -05001049u64 find_lock_delalloc_range(struct extent_io_tree *tree,
1050 u64 *start, u64 *end, u64 max_bytes)
1051{
1052 struct rb_node *node;
1053 struct extent_state *state;
1054 u64 cur_start = *start;
1055 u64 found = 0;
1056 u64 total_bytes = 0;
1057
Chris Mason70dec802008-01-29 09:59:12 -05001058 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001059 /*
1060 * this search will find all the extents that end after
1061 * our range starts.
1062 */
1063search_again:
Chris Mason80ea96b2008-02-01 14:51:59 -05001064 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001065 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001066 if (!found)
1067 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001068 goto out;
1069 }
1070
1071 while(1) {
1072 state = rb_entry(node, struct extent_state, rb_node);
1073 if (found && state->start != cur_start) {
1074 goto out;
1075 }
1076 if (!(state->state & EXTENT_DELALLOC)) {
1077 if (!found)
1078 *end = state->end;
1079 goto out;
1080 }
1081 if (!found) {
1082 struct extent_state *prev_state;
1083 struct rb_node *prev_node = node;
1084 while(1) {
1085 prev_node = rb_prev(prev_node);
1086 if (!prev_node)
1087 break;
1088 prev_state = rb_entry(prev_node,
1089 struct extent_state,
1090 rb_node);
1091 if (!(prev_state->state & EXTENT_DELALLOC))
1092 break;
1093 state = prev_state;
1094 node = prev_node;
1095 }
1096 }
1097 if (state->state & EXTENT_LOCKED) {
1098 DEFINE_WAIT(wait);
1099 atomic_inc(&state->refs);
1100 prepare_to_wait(&state->wq, &wait,
1101 TASK_UNINTERRUPTIBLE);
Chris Mason70dec802008-01-29 09:59:12 -05001102 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001103 schedule();
Chris Mason70dec802008-01-29 09:59:12 -05001104 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001105 finish_wait(&state->wq, &wait);
1106 free_extent_state(state);
1107 goto search_again;
1108 }
Chris Mason291d6732008-01-29 15:55:23 -05001109 set_state_cb(tree, state, EXTENT_LOCKED);
Chris Masonb0c68f82008-01-31 11:05:37 -05001110 state->state |= EXTENT_LOCKED;
Chris Masond1310b22008-01-24 16:13:08 -05001111 if (!found)
1112 *start = state->start;
1113 found++;
1114 *end = state->end;
1115 cur_start = state->end + 1;
1116 node = rb_next(node);
1117 if (!node)
1118 break;
1119 total_bytes += state->end - state->start + 1;
1120 if (total_bytes >= max_bytes)
1121 break;
1122 }
1123out:
Chris Mason70dec802008-01-29 09:59:12 -05001124 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001125 return found;
1126}
1127
1128u64 count_range_bits(struct extent_io_tree *tree,
1129 u64 *start, u64 search_end, u64 max_bytes,
1130 unsigned long bits)
1131{
1132 struct rb_node *node;
1133 struct extent_state *state;
1134 u64 cur_start = *start;
1135 u64 total_bytes = 0;
1136 int found = 0;
1137
1138 if (search_end <= cur_start) {
1139 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1140 WARN_ON(1);
1141 return 0;
1142 }
1143
Chris Mason70dec802008-01-29 09:59:12 -05001144 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001145 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1146 total_bytes = tree->dirty_bytes;
1147 goto out;
1148 }
1149 /*
1150 * this search will find all the extents that end after
1151 * our range starts.
1152 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001153 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001154 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001155 goto out;
1156 }
1157
1158 while(1) {
1159 state = rb_entry(node, struct extent_state, rb_node);
1160 if (state->start > search_end)
1161 break;
1162 if (state->end >= cur_start && (state->state & bits)) {
1163 total_bytes += min(search_end, state->end) + 1 -
1164 max(cur_start, state->start);
1165 if (total_bytes >= max_bytes)
1166 break;
1167 if (!found) {
1168 *start = state->start;
1169 found = 1;
1170 }
1171 }
1172 node = rb_next(node);
1173 if (!node)
1174 break;
1175 }
1176out:
Chris Mason70dec802008-01-29 09:59:12 -05001177 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001178 return total_bytes;
1179}
1180/*
1181 * helper function to lock both pages and extents in the tree.
1182 * pages must be locked first.
1183 */
1184int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1185{
1186 unsigned long index = start >> PAGE_CACHE_SHIFT;
1187 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1188 struct page *page;
1189 int err;
1190
1191 while (index <= end_index) {
1192 page = grab_cache_page(tree->mapping, index);
1193 if (!page) {
1194 err = -ENOMEM;
1195 goto failed;
1196 }
1197 if (IS_ERR(page)) {
1198 err = PTR_ERR(page);
1199 goto failed;
1200 }
1201 index++;
1202 }
1203 lock_extent(tree, start, end, GFP_NOFS);
1204 return 0;
1205
1206failed:
1207 /*
1208 * we failed above in getting the page at 'index', so we undo here
1209 * up to but not including the page at 'index'
1210 */
1211 end_index = index;
1212 index = start >> PAGE_CACHE_SHIFT;
1213 while (index < end_index) {
1214 page = find_get_page(tree->mapping, index);
1215 unlock_page(page);
1216 page_cache_release(page);
1217 index++;
1218 }
1219 return err;
1220}
1221EXPORT_SYMBOL(lock_range);
1222
1223/*
1224 * helper function to unlock both pages and extents in the tree.
1225 */
1226int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1227{
1228 unsigned long index = start >> PAGE_CACHE_SHIFT;
1229 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1230 struct page *page;
1231
1232 while (index <= end_index) {
1233 page = find_get_page(tree->mapping, index);
1234 unlock_page(page);
1235 page_cache_release(page);
1236 index++;
1237 }
1238 unlock_extent(tree, start, end, GFP_NOFS);
1239 return 0;
1240}
1241EXPORT_SYMBOL(unlock_range);
1242
1243int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1244{
1245 struct rb_node *node;
1246 struct extent_state *state;
1247 int ret = 0;
1248
Chris Mason70dec802008-01-29 09:59:12 -05001249 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001250 /*
1251 * this search will find all the extents that end after
1252 * our range starts.
1253 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001254 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001255 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001256 ret = -ENOENT;
1257 goto out;
1258 }
1259 state = rb_entry(node, struct extent_state, rb_node);
1260 if (state->start != start) {
1261 ret = -ENOENT;
1262 goto out;
1263 }
1264 state->private = private;
1265out:
Chris Mason70dec802008-01-29 09:59:12 -05001266 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001267 return ret;
1268}
1269
1270int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1271{
1272 struct rb_node *node;
1273 struct extent_state *state;
1274 int ret = 0;
1275
Chris Mason70dec802008-01-29 09:59:12 -05001276 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001277 /*
1278 * this search will find all the extents that end after
1279 * our range starts.
1280 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001281 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001282 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001283 ret = -ENOENT;
1284 goto out;
1285 }
1286 state = rb_entry(node, struct extent_state, rb_node);
1287 if (state->start != start) {
1288 ret = -ENOENT;
1289 goto out;
1290 }
1291 *private = state->private;
1292out:
Chris Mason70dec802008-01-29 09:59:12 -05001293 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001294 return ret;
1295}
1296
1297/*
1298 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001299 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001300 * has the bits set. Otherwise, 1 is returned if any bit in the
1301 * range is found set.
1302 */
1303int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1304 int bits, int filled)
1305{
1306 struct extent_state *state = NULL;
1307 struct rb_node *node;
1308 int bitset = 0;
1309 unsigned long flags;
1310
Chris Mason70dec802008-01-29 09:59:12 -05001311 spin_lock_irqsave(&tree->lock, flags);
Chris Mason80ea96b2008-02-01 14:51:59 -05001312 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001313 while (node && start <= end) {
1314 state = rb_entry(node, struct extent_state, rb_node);
1315
1316 if (filled && state->start > start) {
1317 bitset = 0;
1318 break;
1319 }
1320
1321 if (state->start > end)
1322 break;
1323
1324 if (state->state & bits) {
1325 bitset = 1;
1326 if (!filled)
1327 break;
1328 } else if (filled) {
1329 bitset = 0;
1330 break;
1331 }
1332 start = state->end + 1;
1333 if (start > end)
1334 break;
1335 node = rb_next(node);
1336 if (!node) {
1337 if (filled)
1338 bitset = 0;
1339 break;
1340 }
1341 }
Chris Mason70dec802008-01-29 09:59:12 -05001342 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -05001343 return bitset;
1344}
1345EXPORT_SYMBOL(test_range_bit);
1346
1347/*
1348 * helper function to set a given page up to date if all the
1349 * extents in the tree for that page are up to date
1350 */
1351static int check_page_uptodate(struct extent_io_tree *tree,
1352 struct page *page)
1353{
1354 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1355 u64 end = start + PAGE_CACHE_SIZE - 1;
1356 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1357 SetPageUptodate(page);
1358 return 0;
1359}
1360
1361/*
1362 * helper function to unlock a page if all the extents in the tree
1363 * for that page are unlocked
1364 */
1365static int check_page_locked(struct extent_io_tree *tree,
1366 struct page *page)
1367{
1368 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1369 u64 end = start + PAGE_CACHE_SIZE - 1;
1370 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1371 unlock_page(page);
1372 return 0;
1373}
1374
1375/*
1376 * helper function to end page writeback if all the extents
1377 * in the tree for that page are done with writeback
1378 */
1379static int check_page_writeback(struct extent_io_tree *tree,
1380 struct page *page)
1381{
1382 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1383 u64 end = start + PAGE_CACHE_SIZE - 1;
1384 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1385 end_page_writeback(page);
1386 return 0;
1387}
1388
1389/* lots and lots of room for performance fixes in the end_bio funcs */
1390
1391/*
1392 * after a writepage IO is done, we need to:
1393 * clear the uptodate bits on error
1394 * clear the writeback bits in the extent tree for this IO
1395 * end_page_writeback if the page has no more pending IO
1396 *
1397 * Scheduling is not allowed, so the extent state tree is expected
1398 * to have one and only one object corresponding to this IO.
1399 */
1400#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1401static void end_bio_extent_writepage(struct bio *bio, int err)
1402#else
1403static int end_bio_extent_writepage(struct bio *bio,
1404 unsigned int bytes_done, int err)
1405#endif
1406{
Chris Mason1259ab72008-05-12 13:39:03 -04001407 int uptodate = err == 0;
Chris Masond1310b22008-01-24 16:13:08 -05001408 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001409 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001410 u64 start;
1411 u64 end;
1412 int whole_page;
Chris Mason1259ab72008-05-12 13:39:03 -04001413 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05001414
1415#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1416 if (bio->bi_size)
1417 return 1;
1418#endif
Chris Masond1310b22008-01-24 16:13:08 -05001419 do {
1420 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001421 tree = &BTRFS_I(page->mapping->host)->io_tree;
1422
Chris Masond1310b22008-01-24 16:13:08 -05001423 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1424 bvec->bv_offset;
1425 end = start + bvec->bv_len - 1;
1426
1427 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1428 whole_page = 1;
1429 else
1430 whole_page = 0;
1431
1432 if (--bvec >= bio->bi_io_vec)
1433 prefetchw(&bvec->bv_page->flags);
Chris Mason1259ab72008-05-12 13:39:03 -04001434 if (tree->ops && tree->ops->writepage_end_io_hook) {
1435 ret = tree->ops->writepage_end_io_hook(page, start,
David Woodhouse902b22f2008-08-20 08:51:49 -04001436 end, NULL, uptodate);
Chris Mason1259ab72008-05-12 13:39:03 -04001437 if (ret)
1438 uptodate = 0;
1439 }
1440
1441 if (!uptodate && tree->ops &&
1442 tree->ops->writepage_io_failed_hook) {
1443 ret = tree->ops->writepage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001444 start, end, NULL);
Chris Mason1259ab72008-05-12 13:39:03 -04001445 if (ret == 0) {
Chris Mason1259ab72008-05-12 13:39:03 -04001446 uptodate = (err == 0);
1447 continue;
1448 }
1449 }
1450
Chris Masond1310b22008-01-24 16:13:08 -05001451 if (!uptodate) {
1452 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1453 ClearPageUptodate(page);
1454 SetPageError(page);
1455 }
Chris Mason70dec802008-01-29 09:59:12 -05001456
David Woodhouse902b22f2008-08-20 08:51:49 -04001457 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05001458
1459 if (whole_page)
1460 end_page_writeback(page);
1461 else
1462 check_page_writeback(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05001463 } while (bvec >= bio->bi_io_vec);
Chris Masond1310b22008-01-24 16:13:08 -05001464 bio_put(bio);
1465#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1466 return 0;
1467#endif
1468}
1469
1470/*
1471 * after a readpage IO is done, we need to:
1472 * clear the uptodate bits on error
1473 * set the uptodate bits if things worked
1474 * set the page up to date if all extents in the tree are uptodate
1475 * clear the lock bit in the extent tree
1476 * unlock the page if there are no other extents locked for it
1477 *
1478 * Scheduling is not allowed, so the extent state tree is expected
1479 * to have one and only one object corresponding to this IO.
1480 */
1481#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1482static void end_bio_extent_readpage(struct bio *bio, int err)
1483#else
1484static int end_bio_extent_readpage(struct bio *bio,
1485 unsigned int bytes_done, int err)
1486#endif
1487{
1488 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1489 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001490 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001491 u64 start;
1492 u64 end;
1493 int whole_page;
1494 int ret;
1495
1496#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1497 if (bio->bi_size)
1498 return 1;
1499#endif
1500
1501 do {
1502 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001503 tree = &BTRFS_I(page->mapping->host)->io_tree;
1504
Chris Masond1310b22008-01-24 16:13:08 -05001505 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1506 bvec->bv_offset;
1507 end = start + bvec->bv_len - 1;
1508
1509 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1510 whole_page = 1;
1511 else
1512 whole_page = 0;
1513
1514 if (--bvec >= bio->bi_io_vec)
1515 prefetchw(&bvec->bv_page->flags);
1516
1517 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
Chris Mason70dec802008-01-29 09:59:12 -05001518 ret = tree->ops->readpage_end_io_hook(page, start, end,
David Woodhouse902b22f2008-08-20 08:51:49 -04001519 NULL);
Chris Masond1310b22008-01-24 16:13:08 -05001520 if (ret)
1521 uptodate = 0;
1522 }
Chris Mason7e383262008-04-09 16:28:12 -04001523 if (!uptodate && tree->ops &&
1524 tree->ops->readpage_io_failed_hook) {
1525 ret = tree->ops->readpage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001526 start, end, NULL);
Chris Mason7e383262008-04-09 16:28:12 -04001527 if (ret == 0) {
Chris Mason3b951512008-04-17 11:29:12 -04001528 uptodate =
1529 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Mason7e383262008-04-09 16:28:12 -04001530 continue;
1531 }
1532 }
Chris Mason70dec802008-01-29 09:59:12 -05001533
David Woodhouse902b22f2008-08-20 08:51:49 -04001534 if (uptodate)
1535 set_extent_uptodate(tree, start, end,
1536 GFP_ATOMIC);
1537 unlock_extent(tree, start, end, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05001538
Chris Mason70dec802008-01-29 09:59:12 -05001539 if (whole_page) {
1540 if (uptodate) {
1541 SetPageUptodate(page);
1542 } else {
1543 ClearPageUptodate(page);
1544 SetPageError(page);
1545 }
Chris Masond1310b22008-01-24 16:13:08 -05001546 unlock_page(page);
Chris Mason70dec802008-01-29 09:59:12 -05001547 } else {
1548 if (uptodate) {
1549 check_page_uptodate(tree, page);
1550 } else {
1551 ClearPageUptodate(page);
1552 SetPageError(page);
1553 }
Chris Masond1310b22008-01-24 16:13:08 -05001554 check_page_locked(tree, page);
Chris Mason70dec802008-01-29 09:59:12 -05001555 }
Chris Masond1310b22008-01-24 16:13:08 -05001556 } while (bvec >= bio->bi_io_vec);
1557
1558 bio_put(bio);
1559#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1560 return 0;
1561#endif
1562}
1563
1564/*
1565 * IO done from prepare_write is pretty simple, we just unlock
1566 * the structs in the extent tree when done, and set the uptodate bits
1567 * as appropriate.
1568 */
1569#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1570static void end_bio_extent_preparewrite(struct bio *bio, int err)
1571#else
1572static int end_bio_extent_preparewrite(struct bio *bio,
1573 unsigned int bytes_done, int err)
1574#endif
1575{
1576 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1577 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001578 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001579 u64 start;
1580 u64 end;
1581
1582#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1583 if (bio->bi_size)
1584 return 1;
1585#endif
1586
1587 do {
1588 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001589 tree = &BTRFS_I(page->mapping->host)->io_tree;
1590
Chris Masond1310b22008-01-24 16:13:08 -05001591 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1592 bvec->bv_offset;
1593 end = start + bvec->bv_len - 1;
1594
1595 if (--bvec >= bio->bi_io_vec)
1596 prefetchw(&bvec->bv_page->flags);
1597
1598 if (uptodate) {
1599 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1600 } else {
1601 ClearPageUptodate(page);
1602 SetPageError(page);
1603 }
1604
1605 unlock_extent(tree, start, end, GFP_ATOMIC);
1606
1607 } while (bvec >= bio->bi_io_vec);
1608
1609 bio_put(bio);
1610#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1611 return 0;
1612#endif
1613}
1614
1615static struct bio *
1616extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1617 gfp_t gfp_flags)
1618{
1619 struct bio *bio;
1620
1621 bio = bio_alloc(gfp_flags, nr_vecs);
1622
1623 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1624 while (!bio && (nr_vecs /= 2))
1625 bio = bio_alloc(gfp_flags, nr_vecs);
1626 }
1627
1628 if (bio) {
Chris Masone1c4b742008-04-22 13:26:46 -04001629 bio->bi_size = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001630 bio->bi_bdev = bdev;
1631 bio->bi_sector = first_sector;
1632 }
1633 return bio;
1634}
1635
Chris Masonf1885912008-04-09 16:28:12 -04001636static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05001637{
Chris Masond1310b22008-01-24 16:13:08 -05001638 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05001639 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1640 struct page *page = bvec->bv_page;
1641 struct extent_io_tree *tree = bio->bi_private;
1642 struct rb_node *node;
1643 struct extent_state *state;
1644 u64 start;
1645 u64 end;
1646
1647 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1648 end = start + bvec->bv_len - 1;
1649
1650 spin_lock_irq(&tree->lock);
Chris Mason80ea96b2008-02-01 14:51:59 -05001651 node = __etree_search(tree, start, NULL, NULL);
Chris Mason70dec802008-01-29 09:59:12 -05001652 BUG_ON(!node);
1653 state = rb_entry(node, struct extent_state, rb_node);
1654 while(state->end < end) {
1655 node = rb_next(node);
1656 state = rb_entry(node, struct extent_state, rb_node);
1657 }
1658 BUG_ON(state->end != end);
1659 spin_unlock_irq(&tree->lock);
1660
David Woodhouse902b22f2008-08-20 08:51:49 -04001661 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001662
1663 bio_get(bio);
1664
Chris Mason065631f2008-02-20 12:07:25 -05001665 if (tree->ops && tree->ops->submit_bio_hook)
Chris Masonf1885912008-04-09 16:28:12 -04001666 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1667 mirror_num);
Chris Mason0b86a832008-03-24 15:01:56 -04001668 else
1669 submit_bio(rw, bio);
Chris Masond1310b22008-01-24 16:13:08 -05001670 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1671 ret = -EOPNOTSUPP;
1672 bio_put(bio);
1673 return ret;
1674}
1675
1676static int submit_extent_page(int rw, struct extent_io_tree *tree,
1677 struct page *page, sector_t sector,
1678 size_t size, unsigned long offset,
1679 struct block_device *bdev,
1680 struct bio **bio_ret,
1681 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04001682 bio_end_io_t end_io_func,
1683 int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05001684{
1685 int ret = 0;
1686 struct bio *bio;
1687 int nr;
1688
1689 if (bio_ret && *bio_ret) {
1690 bio = *bio_ret;
1691 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
Chris Mason239b14b2008-03-24 15:02:07 -04001692 (tree->ops && tree->ops->merge_bio_hook &&
1693 tree->ops->merge_bio_hook(page, offset, size, bio)) ||
Chris Masond1310b22008-01-24 16:13:08 -05001694 bio_add_page(bio, page, size, offset) < size) {
Chris Masonf1885912008-04-09 16:28:12 -04001695 ret = submit_one_bio(rw, bio, mirror_num);
Chris Masond1310b22008-01-24 16:13:08 -05001696 bio = NULL;
1697 } else {
1698 return 0;
1699 }
1700 }
Chris Mason961d0232008-02-06 11:01:42 -05001701 nr = bio_get_nr_vecs(bdev);
Chris Masond1310b22008-01-24 16:13:08 -05001702 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1703 if (!bio) {
1704 printk("failed to allocate bio nr %d\n", nr);
1705 }
Chris Mason70dec802008-01-29 09:59:12 -05001706
1707
Chris Masond1310b22008-01-24 16:13:08 -05001708 bio_add_page(bio, page, size, offset);
1709 bio->bi_end_io = end_io_func;
1710 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05001711
Chris Masond1310b22008-01-24 16:13:08 -05001712 if (bio_ret) {
1713 *bio_ret = bio;
1714 } else {
Chris Masonf1885912008-04-09 16:28:12 -04001715 ret = submit_one_bio(rw, bio, mirror_num);
Chris Masond1310b22008-01-24 16:13:08 -05001716 }
1717
1718 return ret;
1719}
1720
1721void set_page_extent_mapped(struct page *page)
1722{
1723 if (!PagePrivate(page)) {
1724 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001725 page_cache_get(page);
Chris Mason6af118c2008-07-22 11:18:07 -04001726 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05001727 }
1728}
1729
1730void set_page_extent_head(struct page *page, unsigned long len)
1731{
1732 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1733}
1734
1735/*
1736 * basic readpage implementation. Locked extent state structs are inserted
1737 * into the tree that are removed when the IO is done (by the end_io
1738 * handlers)
1739 */
1740static int __extent_read_full_page(struct extent_io_tree *tree,
1741 struct page *page,
1742 get_extent_t *get_extent,
Chris Masonf1885912008-04-09 16:28:12 -04001743 struct bio **bio, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05001744{
1745 struct inode *inode = page->mapping->host;
1746 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1747 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1748 u64 end;
1749 u64 cur = start;
1750 u64 extent_offset;
1751 u64 last_byte = i_size_read(inode);
1752 u64 block_start;
1753 u64 cur_end;
1754 sector_t sector;
1755 struct extent_map *em;
1756 struct block_device *bdev;
1757 int ret;
1758 int nr = 0;
1759 size_t page_offset = 0;
1760 size_t iosize;
1761 size_t blocksize = inode->i_sb->s_blocksize;
1762
1763 set_page_extent_mapped(page);
1764
1765 end = page_end;
1766 lock_extent(tree, start, end, GFP_NOFS);
1767
1768 while (cur <= end) {
1769 if (cur >= last_byte) {
1770 char *userpage;
1771 iosize = PAGE_CACHE_SIZE - page_offset;
1772 userpage = kmap_atomic(page, KM_USER0);
1773 memset(userpage + page_offset, 0, iosize);
1774 flush_dcache_page(page);
1775 kunmap_atomic(userpage, KM_USER0);
1776 set_extent_uptodate(tree, cur, cur + iosize - 1,
1777 GFP_NOFS);
1778 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1779 break;
1780 }
1781 em = get_extent(inode, page, page_offset, cur,
1782 end - cur + 1, 0);
1783 if (IS_ERR(em) || !em) {
1784 SetPageError(page);
1785 unlock_extent(tree, cur, end, GFP_NOFS);
1786 break;
1787 }
Chris Masond1310b22008-01-24 16:13:08 -05001788 extent_offset = cur - em->start;
Chris Masone6dcd2d2008-07-17 12:53:50 -04001789 if (extent_map_end(em) <= cur) {
1790printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1791 }
Chris Masond1310b22008-01-24 16:13:08 -05001792 BUG_ON(extent_map_end(em) <= cur);
Chris Masone6dcd2d2008-07-17 12:53:50 -04001793 if (end < cur) {
1794printk("2bad mapping end %Lu cur %Lu\n", end, cur);
1795 }
Chris Masond1310b22008-01-24 16:13:08 -05001796 BUG_ON(end < cur);
1797
1798 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1799 cur_end = min(extent_map_end(em) - 1, end);
1800 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1801 sector = (em->block_start + extent_offset) >> 9;
1802 bdev = em->bdev;
1803 block_start = em->block_start;
1804 free_extent_map(em);
1805 em = NULL;
1806
1807 /* we've found a hole, just zero and go on */
1808 if (block_start == EXTENT_MAP_HOLE) {
1809 char *userpage;
1810 userpage = kmap_atomic(page, KM_USER0);
1811 memset(userpage + page_offset, 0, iosize);
1812 flush_dcache_page(page);
1813 kunmap_atomic(userpage, KM_USER0);
1814
1815 set_extent_uptodate(tree, cur, cur + iosize - 1,
1816 GFP_NOFS);
1817 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1818 cur = cur + iosize;
1819 page_offset += iosize;
1820 continue;
1821 }
1822 /* the get_extent function already copied into the page */
1823 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
Chris Masona1b32a52008-09-05 16:09:51 -04001824 check_page_uptodate(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05001825 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1826 cur = cur + iosize;
1827 page_offset += iosize;
1828 continue;
1829 }
Chris Mason70dec802008-01-29 09:59:12 -05001830 /* we have an inline extent but it didn't get marked up
1831 * to date. Error out
1832 */
1833 if (block_start == EXTENT_MAP_INLINE) {
1834 SetPageError(page);
1835 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1836 cur = cur + iosize;
1837 page_offset += iosize;
1838 continue;
1839 }
Chris Masond1310b22008-01-24 16:13:08 -05001840
1841 ret = 0;
1842 if (tree->ops && tree->ops->readpage_io_hook) {
1843 ret = tree->ops->readpage_io_hook(page, cur,
1844 cur + iosize - 1);
1845 }
1846 if (!ret) {
Chris Mason89642222008-07-24 09:41:53 -04001847 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1848 pnr -= page->index;
Chris Masond1310b22008-01-24 16:13:08 -05001849 ret = submit_extent_page(READ, tree, page,
1850 sector, iosize, page_offset,
Chris Mason89642222008-07-24 09:41:53 -04001851 bdev, bio, pnr,
Chris Masonf1885912008-04-09 16:28:12 -04001852 end_bio_extent_readpage, mirror_num);
Chris Mason89642222008-07-24 09:41:53 -04001853 nr++;
Chris Masond1310b22008-01-24 16:13:08 -05001854 }
1855 if (ret)
1856 SetPageError(page);
1857 cur = cur + iosize;
1858 page_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05001859 }
1860 if (!nr) {
1861 if (!PageError(page))
1862 SetPageUptodate(page);
1863 unlock_page(page);
1864 }
1865 return 0;
1866}
1867
1868int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
1869 get_extent_t *get_extent)
1870{
1871 struct bio *bio = NULL;
1872 int ret;
1873
Chris Masonf1885912008-04-09 16:28:12 -04001874 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0);
Chris Masond1310b22008-01-24 16:13:08 -05001875 if (bio)
Chris Masonf1885912008-04-09 16:28:12 -04001876 submit_one_bio(READ, bio, 0);
Chris Masond1310b22008-01-24 16:13:08 -05001877 return ret;
1878}
1879EXPORT_SYMBOL(extent_read_full_page);
1880
1881/*
1882 * the writepage semantics are similar to regular writepage. extent
1883 * records are inserted to lock ranges in the tree, and as dirty areas
1884 * are found, they are marked writeback. Then the lock bits are removed
1885 * and the end_io handler clears the writeback ranges
1886 */
1887static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1888 void *data)
1889{
1890 struct inode *inode = page->mapping->host;
1891 struct extent_page_data *epd = data;
1892 struct extent_io_tree *tree = epd->tree;
1893 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1894 u64 delalloc_start;
1895 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1896 u64 end;
1897 u64 cur = start;
1898 u64 extent_offset;
1899 u64 last_byte = i_size_read(inode);
1900 u64 block_start;
1901 u64 iosize;
Chris Masone6dcd2d2008-07-17 12:53:50 -04001902 u64 unlock_start;
Chris Masond1310b22008-01-24 16:13:08 -05001903 sector_t sector;
1904 struct extent_map *em;
1905 struct block_device *bdev;
1906 int ret;
1907 int nr = 0;
Chris Mason7f3c74f2008-07-18 12:01:11 -04001908 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001909 size_t blocksize;
1910 loff_t i_size = i_size_read(inode);
1911 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1912 u64 nr_delalloc;
1913 u64 delalloc_end;
1914
1915 WARN_ON(!PageLocked(page));
Chris Mason7f3c74f2008-07-18 12:01:11 -04001916 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason211c17f2008-05-15 09:13:45 -04001917 if (page->index > end_index ||
Chris Mason7f3c74f2008-07-18 12:01:11 -04001918 (page->index == end_index && !pg_offset)) {
Chris Mason211c17f2008-05-15 09:13:45 -04001919 page->mapping->a_ops->invalidatepage(page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05001920 unlock_page(page);
1921 return 0;
1922 }
1923
1924 if (page->index == end_index) {
1925 char *userpage;
1926
Chris Masond1310b22008-01-24 16:13:08 -05001927 userpage = kmap_atomic(page, KM_USER0);
Chris Mason7f3c74f2008-07-18 12:01:11 -04001928 memset(userpage + pg_offset, 0,
1929 PAGE_CACHE_SIZE - pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05001930 kunmap_atomic(userpage, KM_USER0);
Chris Mason211c17f2008-05-15 09:13:45 -04001931 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05001932 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04001933 pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001934
1935 set_page_extent_mapped(page);
1936
1937 delalloc_start = start;
1938 delalloc_end = 0;
1939 while(delalloc_end < page_end) {
1940 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1941 &delalloc_end,
1942 128 * 1024 * 1024);
1943 if (nr_delalloc == 0) {
1944 delalloc_start = delalloc_end + 1;
1945 continue;
1946 }
1947 tree->ops->fill_delalloc(inode, delalloc_start,
1948 delalloc_end);
1949 clear_extent_bit(tree, delalloc_start,
1950 delalloc_end,
1951 EXTENT_LOCKED | EXTENT_DELALLOC,
1952 1, 0, GFP_NOFS);
1953 delalloc_start = delalloc_end + 1;
1954 }
1955 lock_extent(tree, start, page_end, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04001956 unlock_start = start;
Chris Masond1310b22008-01-24 16:13:08 -05001957
Chris Mason247e7432008-07-17 12:53:51 -04001958 if (tree->ops && tree->ops->writepage_start_hook) {
1959 ret = tree->ops->writepage_start_hook(page, start, page_end);
1960 if (ret == -EAGAIN) {
1961 unlock_extent(tree, start, page_end, GFP_NOFS);
1962 redirty_page_for_writepage(wbc, page);
1963 unlock_page(page);
1964 return 0;
1965 }
1966 }
1967
Chris Masond1310b22008-01-24 16:13:08 -05001968 end = page_end;
1969 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1970 printk("found delalloc bits after lock_extent\n");
1971 }
1972
1973 if (last_byte <= start) {
1974 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04001975 unlock_extent(tree, start, page_end, GFP_NOFS);
1976 if (tree->ops && tree->ops->writepage_end_io_hook)
1977 tree->ops->writepage_end_io_hook(page, start,
1978 page_end, NULL, 1);
1979 unlock_start = page_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05001980 goto done;
1981 }
1982
1983 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1984 blocksize = inode->i_sb->s_blocksize;
1985
1986 while (cur <= end) {
1987 if (cur >= last_byte) {
1988 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04001989 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
1990 if (tree->ops && tree->ops->writepage_end_io_hook)
1991 tree->ops->writepage_end_io_hook(page, cur,
1992 page_end, NULL, 1);
1993 unlock_start = page_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05001994 break;
1995 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04001996 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05001997 end - cur + 1, 1);
1998 if (IS_ERR(em) || !em) {
1999 SetPageError(page);
2000 break;
2001 }
2002
2003 extent_offset = cur - em->start;
2004 BUG_ON(extent_map_end(em) <= cur);
2005 BUG_ON(end < cur);
2006 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2007 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2008 sector = (em->block_start + extent_offset) >> 9;
2009 bdev = em->bdev;
2010 block_start = em->block_start;
2011 free_extent_map(em);
2012 em = NULL;
2013
2014 if (block_start == EXTENT_MAP_HOLE ||
2015 block_start == EXTENT_MAP_INLINE) {
2016 clear_extent_dirty(tree, cur,
2017 cur + iosize - 1, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04002018
2019 unlock_extent(tree, unlock_start, cur + iosize -1,
2020 GFP_NOFS);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002021
Chris Masone6dcd2d2008-07-17 12:53:50 -04002022 if (tree->ops && tree->ops->writepage_end_io_hook)
2023 tree->ops->writepage_end_io_hook(page, cur,
2024 cur + iosize - 1,
2025 NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002026 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002027 pg_offset += iosize;
Chris Masone6dcd2d2008-07-17 12:53:50 -04002028 unlock_start = cur;
Chris Masond1310b22008-01-24 16:13:08 -05002029 continue;
2030 }
2031
2032 /* leave this out until we have a page_mkwrite call */
2033 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2034 EXTENT_DIRTY, 0)) {
2035 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002036 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002037 continue;
2038 }
2039 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2040 if (tree->ops && tree->ops->writepage_io_hook) {
2041 ret = tree->ops->writepage_io_hook(page, cur,
2042 cur + iosize - 1);
2043 } else {
2044 ret = 0;
2045 }
Chris Mason1259ab72008-05-12 13:39:03 -04002046 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05002047 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04002048 } else {
Chris Masond1310b22008-01-24 16:13:08 -05002049 unsigned long max_nr = end_index + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002050
Chris Masond1310b22008-01-24 16:13:08 -05002051 set_range_writeback(tree, cur, cur + iosize - 1);
2052 if (!PageWriteback(page)) {
2053 printk("warning page %lu not writeback, "
2054 "cur %llu end %llu\n", page->index,
2055 (unsigned long long)cur,
2056 (unsigned long long)end);
2057 }
2058
2059 ret = submit_extent_page(WRITE, tree, page, sector,
Chris Mason7f3c74f2008-07-18 12:01:11 -04002060 iosize, pg_offset, bdev,
Chris Masond1310b22008-01-24 16:13:08 -05002061 &epd->bio, max_nr,
Chris Masonf1885912008-04-09 16:28:12 -04002062 end_bio_extent_writepage, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002063 if (ret)
2064 SetPageError(page);
2065 }
2066 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002067 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002068 nr++;
2069 }
2070done:
2071 if (nr == 0) {
2072 /* make sure the mapping tag for page dirty gets cleared */
2073 set_page_writeback(page);
2074 end_page_writeback(page);
2075 }
Chris Masone6dcd2d2008-07-17 12:53:50 -04002076 if (unlock_start <= page_end)
2077 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002078 unlock_page(page);
2079 return 0;
2080}
2081
Chris Mason5e478dc2008-04-25 09:10:45 -04002082#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
Chris Mason4bef0842008-09-08 11:18:08 -04002083/* Taken directly from 2.6.23 with a mod for a lockpage hook */
Chris Masond1310b22008-01-24 16:13:08 -05002084typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2085 void *data);
Chris Mason4bef0842008-09-08 11:18:08 -04002086#endif
Chris Masond1310b22008-01-24 16:13:08 -05002087
2088/**
Chris Mason4bef0842008-09-08 11:18:08 -04002089 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05002090 * @mapping: address space structure to write
2091 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2092 * @writepage: function called for each page
2093 * @data: data passed to writepage function
2094 *
2095 * If a page is already under I/O, write_cache_pages() skips it, even
2096 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2097 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2098 * and msync() need to guarantee that all the data which was dirty at the time
2099 * the call was made get new I/O started against them. If wbc->sync_mode is
2100 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2101 * existing IO to complete.
2102 */
Chris Mason4bef0842008-09-08 11:18:08 -04002103int extent_write_cache_pages(struct extent_io_tree *tree,
2104 struct address_space *mapping,
2105 struct writeback_control *wbc,
2106 writepage_t writepage, void *data)
Chris Masond1310b22008-01-24 16:13:08 -05002107{
2108 struct backing_dev_info *bdi = mapping->backing_dev_info;
2109 int ret = 0;
2110 int done = 0;
2111 struct pagevec pvec;
2112 int nr_pages;
2113 pgoff_t index;
2114 pgoff_t end; /* Inclusive */
2115 int scanned = 0;
2116 int range_whole = 0;
2117
2118 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2119 wbc->encountered_congestion = 1;
2120 return 0;
2121 }
2122
2123 pagevec_init(&pvec, 0);
2124 if (wbc->range_cyclic) {
2125 index = mapping->writeback_index; /* Start from prev offset */
2126 end = -1;
2127 } else {
2128 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2129 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2130 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2131 range_whole = 1;
2132 scanned = 1;
2133 }
2134retry:
2135 while (!done && (index <= end) &&
2136 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2137 PAGECACHE_TAG_DIRTY,
2138 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2139 unsigned i;
2140
2141 scanned = 1;
2142 for (i = 0; i < nr_pages; i++) {
2143 struct page *page = pvec.pages[i];
2144
2145 /*
2146 * At this point we hold neither mapping->tree_lock nor
2147 * lock on the page itself: the page may be truncated or
2148 * invalidated (changing page->mapping to NULL), or even
2149 * swizzled back from swapper_space to tmpfs file
2150 * mapping
2151 */
Chris Mason4bef0842008-09-08 11:18:08 -04002152 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2153 tree->ops->write_cache_pages_lock_hook(page);
2154 else
2155 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002156
2157 if (unlikely(page->mapping != mapping)) {
2158 unlock_page(page);
2159 continue;
2160 }
2161
2162 if (!wbc->range_cyclic && page->index > end) {
2163 done = 1;
2164 unlock_page(page);
2165 continue;
2166 }
2167
2168 if (wbc->sync_mode != WB_SYNC_NONE)
2169 wait_on_page_writeback(page);
2170
2171 if (PageWriteback(page) ||
2172 !clear_page_dirty_for_io(page)) {
2173 unlock_page(page);
2174 continue;
2175 }
2176
2177 ret = (*writepage)(page, wbc, data);
2178
2179 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2180 unlock_page(page);
2181 ret = 0;
2182 }
2183 if (ret || (--(wbc->nr_to_write) <= 0))
2184 done = 1;
2185 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2186 wbc->encountered_congestion = 1;
2187 done = 1;
2188 }
2189 }
2190 pagevec_release(&pvec);
2191 cond_resched();
2192 }
2193 if (!scanned && !done) {
2194 /*
2195 * We hit the last page and there is more work to be done: wrap
2196 * back to the start of the file
2197 */
2198 scanned = 1;
2199 index = 0;
2200 goto retry;
2201 }
2202 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2203 mapping->writeback_index = index;
Chris Mason4bef0842008-09-08 11:18:08 -04002204
2205 if (wbc->range_cont)
2206 wbc->range_start = index << PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002207 return ret;
2208}
Chris Mason4bef0842008-09-08 11:18:08 -04002209EXPORT_SYMBOL(extent_write_cache_pages);
Chris Masond1310b22008-01-24 16:13:08 -05002210
2211int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2212 get_extent_t *get_extent,
2213 struct writeback_control *wbc)
2214{
2215 int ret;
2216 struct address_space *mapping = page->mapping;
2217 struct extent_page_data epd = {
2218 .bio = NULL,
2219 .tree = tree,
2220 .get_extent = get_extent,
2221 };
2222 struct writeback_control wbc_writepages = {
2223 .bdi = wbc->bdi,
2224 .sync_mode = WB_SYNC_NONE,
2225 .older_than_this = NULL,
2226 .nr_to_write = 64,
2227 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2228 .range_end = (loff_t)-1,
2229 };
2230
2231
2232 ret = __extent_writepage(page, wbc, &epd);
2233
Chris Mason4bef0842008-09-08 11:18:08 -04002234 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2235 __extent_writepage, &epd);
Chris Masond1310b22008-01-24 16:13:08 -05002236 if (epd.bio) {
Chris Masonf1885912008-04-09 16:28:12 -04002237 submit_one_bio(WRITE, epd.bio, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002238 }
2239 return ret;
2240}
2241EXPORT_SYMBOL(extent_write_full_page);
2242
2243
2244int extent_writepages(struct extent_io_tree *tree,
2245 struct address_space *mapping,
2246 get_extent_t *get_extent,
2247 struct writeback_control *wbc)
2248{
2249 int ret = 0;
2250 struct extent_page_data epd = {
2251 .bio = NULL,
2252 .tree = tree,
2253 .get_extent = get_extent,
2254 };
2255
Chris Mason4bef0842008-09-08 11:18:08 -04002256 ret = extent_write_cache_pages(tree, mapping, wbc,
2257 __extent_writepage, &epd);
Chris Masond1310b22008-01-24 16:13:08 -05002258 if (epd.bio) {
Chris Masonf1885912008-04-09 16:28:12 -04002259 submit_one_bio(WRITE, epd.bio, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002260 }
2261 return ret;
2262}
2263EXPORT_SYMBOL(extent_writepages);
2264
2265int extent_readpages(struct extent_io_tree *tree,
2266 struct address_space *mapping,
2267 struct list_head *pages, unsigned nr_pages,
2268 get_extent_t get_extent)
2269{
2270 struct bio *bio = NULL;
2271 unsigned page_idx;
2272 struct pagevec pvec;
2273
2274 pagevec_init(&pvec, 0);
2275 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2276 struct page *page = list_entry(pages->prev, struct page, lru);
2277
2278 prefetchw(&page->flags);
2279 list_del(&page->lru);
2280 /*
2281 * what we want to do here is call add_to_page_cache_lru,
2282 * but that isn't exported, so we reproduce it here
2283 */
2284 if (!add_to_page_cache(page, mapping,
2285 page->index, GFP_KERNEL)) {
2286
2287 /* open coding of lru_cache_add, also not exported */
2288 page_cache_get(page);
2289 if (!pagevec_add(&pvec, page))
2290 __pagevec_lru_add(&pvec);
Chris Masonf1885912008-04-09 16:28:12 -04002291 __extent_read_full_page(tree, page, get_extent,
2292 &bio, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002293 }
2294 page_cache_release(page);
2295 }
2296 if (pagevec_count(&pvec))
2297 __pagevec_lru_add(&pvec);
2298 BUG_ON(!list_empty(pages));
2299 if (bio)
Chris Masonf1885912008-04-09 16:28:12 -04002300 submit_one_bio(READ, bio, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002301 return 0;
2302}
2303EXPORT_SYMBOL(extent_readpages);
2304
2305/*
2306 * basic invalidatepage code, this waits on any locked or writeback
2307 * ranges corresponding to the page, and then deletes any extent state
2308 * records from the tree
2309 */
2310int extent_invalidatepage(struct extent_io_tree *tree,
2311 struct page *page, unsigned long offset)
2312{
2313 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2314 u64 end = start + PAGE_CACHE_SIZE - 1;
2315 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2316
2317 start += (offset + blocksize -1) & ~(blocksize - 1);
2318 if (start > end)
2319 return 0;
2320
2321 lock_extent(tree, start, end, GFP_NOFS);
2322 wait_on_extent_writeback(tree, start, end);
2323 clear_extent_bit(tree, start, end,
2324 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2325 1, 1, GFP_NOFS);
2326 return 0;
2327}
2328EXPORT_SYMBOL(extent_invalidatepage);
2329
2330/*
2331 * simple commit_write call, set_range_dirty is used to mark both
2332 * the pages and the extent records as dirty
2333 */
2334int extent_commit_write(struct extent_io_tree *tree,
2335 struct inode *inode, struct page *page,
2336 unsigned from, unsigned to)
2337{
2338 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2339
2340 set_page_extent_mapped(page);
2341 set_page_dirty(page);
2342
2343 if (pos > inode->i_size) {
2344 i_size_write(inode, pos);
2345 mark_inode_dirty(inode);
2346 }
2347 return 0;
2348}
2349EXPORT_SYMBOL(extent_commit_write);
2350
2351int extent_prepare_write(struct extent_io_tree *tree,
2352 struct inode *inode, struct page *page,
2353 unsigned from, unsigned to, get_extent_t *get_extent)
2354{
2355 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2356 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2357 u64 block_start;
2358 u64 orig_block_start;
2359 u64 block_end;
2360 u64 cur_end;
2361 struct extent_map *em;
2362 unsigned blocksize = 1 << inode->i_blkbits;
2363 size_t page_offset = 0;
2364 size_t block_off_start;
2365 size_t block_off_end;
2366 int err = 0;
2367 int iocount = 0;
2368 int ret = 0;
2369 int isnew;
2370
2371 set_page_extent_mapped(page);
2372
2373 block_start = (page_start + from) & ~((u64)blocksize - 1);
2374 block_end = (page_start + to - 1) | (blocksize - 1);
2375 orig_block_start = block_start;
2376
2377 lock_extent(tree, page_start, page_end, GFP_NOFS);
2378 while(block_start <= block_end) {
2379 em = get_extent(inode, page, page_offset, block_start,
2380 block_end - block_start + 1, 1);
2381 if (IS_ERR(em) || !em) {
2382 goto err;
2383 }
2384 cur_end = min(block_end, extent_map_end(em) - 1);
2385 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2386 block_off_end = block_off_start + blocksize;
2387 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2388
2389 if (!PageUptodate(page) && isnew &&
2390 (block_off_end > to || block_off_start < from)) {
2391 void *kaddr;
2392
2393 kaddr = kmap_atomic(page, KM_USER0);
2394 if (block_off_end > to)
2395 memset(kaddr + to, 0, block_off_end - to);
2396 if (block_off_start < from)
2397 memset(kaddr + block_off_start, 0,
2398 from - block_off_start);
2399 flush_dcache_page(page);
2400 kunmap_atomic(kaddr, KM_USER0);
2401 }
2402 if ((em->block_start != EXTENT_MAP_HOLE &&
2403 em->block_start != EXTENT_MAP_INLINE) &&
2404 !isnew && !PageUptodate(page) &&
2405 (block_off_end > to || block_off_start < from) &&
2406 !test_range_bit(tree, block_start, cur_end,
2407 EXTENT_UPTODATE, 1)) {
2408 u64 sector;
2409 u64 extent_offset = block_start - em->start;
2410 size_t iosize;
2411 sector = (em->block_start + extent_offset) >> 9;
2412 iosize = (cur_end - block_start + blocksize) &
2413 ~((u64)blocksize - 1);
2414 /*
2415 * we've already got the extent locked, but we
2416 * need to split the state such that our end_bio
2417 * handler can clear the lock.
2418 */
2419 set_extent_bit(tree, block_start,
2420 block_start + iosize - 1,
2421 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2422 ret = submit_extent_page(READ, tree, page,
2423 sector, iosize, page_offset, em->bdev,
2424 NULL, 1,
Chris Masonf1885912008-04-09 16:28:12 -04002425 end_bio_extent_preparewrite, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002426 iocount++;
2427 block_start = block_start + iosize;
2428 } else {
2429 set_extent_uptodate(tree, block_start, cur_end,
2430 GFP_NOFS);
2431 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2432 block_start = cur_end + 1;
2433 }
2434 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2435 free_extent_map(em);
2436 }
2437 if (iocount) {
2438 wait_extent_bit(tree, orig_block_start,
2439 block_end, EXTENT_LOCKED);
2440 }
2441 check_page_uptodate(tree, page);
2442err:
2443 /* FIXME, zero out newly allocated blocks on error */
2444 return err;
2445}
2446EXPORT_SYMBOL(extent_prepare_write);
2447
2448/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04002449 * a helper for releasepage, this tests for areas of the page that
2450 * are locked or under IO and drops the related state bits if it is safe
2451 * to drop the page.
2452 */
2453int try_release_extent_state(struct extent_map_tree *map,
2454 struct extent_io_tree *tree, struct page *page,
2455 gfp_t mask)
2456{
2457 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2458 u64 end = start + PAGE_CACHE_SIZE - 1;
2459 int ret = 1;
2460
Chris Mason211f90e2008-07-18 11:56:15 -04002461 if (test_range_bit(tree, start, end,
2462 EXTENT_IOBITS | EXTENT_ORDERED, 0))
Chris Mason7b13b7b2008-04-18 10:29:50 -04002463 ret = 0;
2464 else {
2465 if ((mask & GFP_NOFS) == GFP_NOFS)
2466 mask = GFP_NOFS;
2467 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2468 1, 1, mask);
2469 }
2470 return ret;
2471}
2472EXPORT_SYMBOL(try_release_extent_state);
2473
2474/*
Chris Masond1310b22008-01-24 16:13:08 -05002475 * a helper for releasepage. As long as there are no locked extents
2476 * in the range corresponding to the page, both state records and extent
2477 * map records are removed
2478 */
2479int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05002480 struct extent_io_tree *tree, struct page *page,
2481 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05002482{
2483 struct extent_map *em;
2484 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2485 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04002486
Chris Mason70dec802008-01-29 09:59:12 -05002487 if ((mask & __GFP_WAIT) &&
2488 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05002489 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05002490 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05002491 len = end - start + 1;
Chris Mason70dec802008-01-29 09:59:12 -05002492 spin_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05002493 em = lookup_extent_mapping(map, start, len);
Chris Mason70dec802008-01-29 09:59:12 -05002494 if (!em || IS_ERR(em)) {
2495 spin_unlock(&map->lock);
2496 break;
2497 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002498 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2499 em->start != start) {
Chris Mason70dec802008-01-29 09:59:12 -05002500 spin_unlock(&map->lock);
2501 free_extent_map(em);
2502 break;
2503 }
2504 if (!test_range_bit(tree, em->start,
2505 extent_map_end(em) - 1,
2506 EXTENT_LOCKED, 0)) {
2507 remove_extent_mapping(map, em);
2508 /* once for the rb tree */
2509 free_extent_map(em);
2510 }
2511 start = extent_map_end(em);
Chris Masond1310b22008-01-24 16:13:08 -05002512 spin_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002513
2514 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05002515 free_extent_map(em);
2516 }
Chris Masond1310b22008-01-24 16:13:08 -05002517 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04002518 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05002519}
2520EXPORT_SYMBOL(try_release_extent_mapping);
2521
2522sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2523 get_extent_t *get_extent)
2524{
2525 struct inode *inode = mapping->host;
2526 u64 start = iblock << inode->i_blkbits;
2527 sector_t sector = 0;
2528 struct extent_map *em;
2529
2530 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2531 if (!em || IS_ERR(em))
2532 return 0;
2533
2534 if (em->block_start == EXTENT_MAP_INLINE ||
2535 em->block_start == EXTENT_MAP_HOLE)
2536 goto out;
2537
2538 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
Chris Masond1310b22008-01-24 16:13:08 -05002539out:
2540 free_extent_map(em);
2541 return sector;
2542}
2543
Chris Masond1310b22008-01-24 16:13:08 -05002544static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2545 unsigned long i)
2546{
2547 struct page *p;
2548 struct address_space *mapping;
2549
2550 if (i == 0)
2551 return eb->first_page;
2552 i += eb->start >> PAGE_CACHE_SHIFT;
2553 mapping = eb->first_page->mapping;
Chris Mason33958dc2008-07-30 10:29:12 -04002554 if (!mapping)
2555 return NULL;
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002556
2557 /*
2558 * extent_buffer_page is only called after pinning the page
2559 * by increasing the reference count. So we know the page must
2560 * be in the radix tree.
2561 */
2562#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2563 rcu_read_lock();
2564#else
Chris Masond1310b22008-01-24 16:13:08 -05002565 read_lock_irq(&mapping->tree_lock);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002566#endif
Chris Masond1310b22008-01-24 16:13:08 -05002567 p = radix_tree_lookup(&mapping->page_tree, i);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002568
2569#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2570 rcu_read_unlock();
2571#else
Chris Masond1310b22008-01-24 16:13:08 -05002572 read_unlock_irq(&mapping->tree_lock);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002573#endif
Chris Masond1310b22008-01-24 16:13:08 -05002574 return p;
2575}
2576
Chris Mason6af118c2008-07-22 11:18:07 -04002577static inline unsigned long num_extent_pages(u64 start, u64 len)
Chris Masonce9adaa2008-04-09 16:28:12 -04002578{
Chris Mason6af118c2008-07-22 11:18:07 -04002579 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2580 (start >> PAGE_CACHE_SHIFT);
Chris Mason728131d2008-04-09 16:28:12 -04002581}
2582
Chris Masond1310b22008-01-24 16:13:08 -05002583static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2584 u64 start,
2585 unsigned long len,
2586 gfp_t mask)
2587{
2588 struct extent_buffer *eb = NULL;
Chris Mason4bef0842008-09-08 11:18:08 -04002589#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04002590 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -04002591#endif
Chris Masond1310b22008-01-24 16:13:08 -05002592
Chris Masond1310b22008-01-24 16:13:08 -05002593 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Chris Masond1310b22008-01-24 16:13:08 -05002594 eb->start = start;
2595 eb->len = len;
Chris Masona61e6f22008-07-22 11:18:08 -04002596 mutex_init(&eb->mutex);
Chris Mason4bef0842008-09-08 11:18:08 -04002597#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04002598 spin_lock_irqsave(&leak_lock, flags);
2599 list_add(&eb->leak_list, &buffers);
2600 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04002601#endif
Chris Masond1310b22008-01-24 16:13:08 -05002602 atomic_set(&eb->refs, 1);
2603
2604 return eb;
2605}
2606
2607static void __free_extent_buffer(struct extent_buffer *eb)
2608{
Chris Mason4bef0842008-09-08 11:18:08 -04002609#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04002610 unsigned long flags;
2611 spin_lock_irqsave(&leak_lock, flags);
2612 list_del(&eb->leak_list);
2613 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04002614#endif
Chris Masond1310b22008-01-24 16:13:08 -05002615 kmem_cache_free(extent_buffer_cache, eb);
2616}
2617
2618struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2619 u64 start, unsigned long len,
2620 struct page *page0,
2621 gfp_t mask)
2622{
2623 unsigned long num_pages = num_extent_pages(start, len);
2624 unsigned long i;
2625 unsigned long index = start >> PAGE_CACHE_SHIFT;
2626 struct extent_buffer *eb;
Chris Mason6af118c2008-07-22 11:18:07 -04002627 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002628 struct page *p;
2629 struct address_space *mapping = tree->mapping;
2630 int uptodate = 1;
2631
Chris Mason6af118c2008-07-22 11:18:07 -04002632 spin_lock(&tree->buffer_lock);
2633 eb = buffer_search(tree, start);
2634 if (eb) {
2635 atomic_inc(&eb->refs);
2636 spin_unlock(&tree->buffer_lock);
Josef Bacik0f9dd462008-09-23 13:14:11 -04002637 mark_page_accessed(eb->first_page);
Chris Mason6af118c2008-07-22 11:18:07 -04002638 return eb;
2639 }
2640 spin_unlock(&tree->buffer_lock);
2641
Chris Masond1310b22008-01-24 16:13:08 -05002642 eb = __alloc_extent_buffer(tree, start, len, mask);
Peter2b114d12008-04-01 11:21:40 -04002643 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05002644 return NULL;
2645
Chris Masond1310b22008-01-24 16:13:08 -05002646 if (page0) {
2647 eb->first_page = page0;
2648 i = 1;
2649 index++;
2650 page_cache_get(page0);
2651 mark_page_accessed(page0);
2652 set_page_extent_mapped(page0);
Chris Masond1310b22008-01-24 16:13:08 -05002653 set_page_extent_head(page0, len);
Chris Masonf1885912008-04-09 16:28:12 -04002654 uptodate = PageUptodate(page0);
Chris Masond1310b22008-01-24 16:13:08 -05002655 } else {
2656 i = 0;
2657 }
2658 for (; i < num_pages; i++, index++) {
2659 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2660 if (!p) {
2661 WARN_ON(1);
Chris Mason6af118c2008-07-22 11:18:07 -04002662 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05002663 }
2664 set_page_extent_mapped(p);
2665 mark_page_accessed(p);
2666 if (i == 0) {
2667 eb->first_page = p;
2668 set_page_extent_head(p, len);
2669 } else {
2670 set_page_private(p, EXTENT_PAGE_PRIVATE);
2671 }
2672 if (!PageUptodate(p))
2673 uptodate = 0;
2674 unlock_page(p);
2675 }
2676 if (uptodate)
2677 eb->flags |= EXTENT_UPTODATE;
2678 eb->flags |= EXTENT_BUFFER_FILLED;
2679
Chris Mason6af118c2008-07-22 11:18:07 -04002680 spin_lock(&tree->buffer_lock);
2681 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2682 if (exists) {
2683 /* add one reference for the caller */
2684 atomic_inc(&exists->refs);
2685 spin_unlock(&tree->buffer_lock);
2686 goto free_eb;
2687 }
2688 spin_unlock(&tree->buffer_lock);
2689
2690 /* add one reference for the tree */
2691 atomic_inc(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05002692 return eb;
2693
Chris Mason6af118c2008-07-22 11:18:07 -04002694free_eb:
Chris Masond1310b22008-01-24 16:13:08 -05002695 if (!atomic_dec_and_test(&eb->refs))
Chris Mason6af118c2008-07-22 11:18:07 -04002696 return exists;
2697 for (index = 1; index < i; index++)
Chris Masond1310b22008-01-24 16:13:08 -05002698 page_cache_release(extent_buffer_page(eb, index));
Chris Mason6af118c2008-07-22 11:18:07 -04002699 page_cache_release(extent_buffer_page(eb, 0));
Chris Masond1310b22008-01-24 16:13:08 -05002700 __free_extent_buffer(eb);
Chris Mason6af118c2008-07-22 11:18:07 -04002701 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05002702}
2703EXPORT_SYMBOL(alloc_extent_buffer);
2704
2705struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2706 u64 start, unsigned long len,
2707 gfp_t mask)
2708{
Chris Masond1310b22008-01-24 16:13:08 -05002709 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -05002710
Chris Mason6af118c2008-07-22 11:18:07 -04002711 spin_lock(&tree->buffer_lock);
2712 eb = buffer_search(tree, start);
2713 if (eb)
2714 atomic_inc(&eb->refs);
2715 spin_unlock(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -05002716
Josef Bacik0f9dd462008-09-23 13:14:11 -04002717 if (eb)
2718 mark_page_accessed(eb->first_page);
2719
Chris Masond1310b22008-01-24 16:13:08 -05002720 return eb;
Chris Masond1310b22008-01-24 16:13:08 -05002721}
2722EXPORT_SYMBOL(find_extent_buffer);
2723
2724void free_extent_buffer(struct extent_buffer *eb)
2725{
Chris Masond1310b22008-01-24 16:13:08 -05002726 if (!eb)
2727 return;
2728
2729 if (!atomic_dec_and_test(&eb->refs))
2730 return;
2731
Chris Mason6af118c2008-07-22 11:18:07 -04002732 WARN_ON(1);
Chris Masond1310b22008-01-24 16:13:08 -05002733}
2734EXPORT_SYMBOL(free_extent_buffer);
2735
2736int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2737 struct extent_buffer *eb)
2738{
2739 int set;
2740 unsigned long i;
2741 unsigned long num_pages;
2742 struct page *page;
2743
2744 u64 start = eb->start;
2745 u64 end = start + eb->len - 1;
2746
2747 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2748 num_pages = num_extent_pages(eb->start, eb->len);
2749
2750 for (i = 0; i < num_pages; i++) {
2751 page = extent_buffer_page(eb, i);
Chris Masona61e6f22008-07-22 11:18:08 -04002752 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002753 if (i == 0)
2754 set_page_extent_head(page, eb->len);
2755 else
2756 set_page_private(page, EXTENT_PAGE_PRIVATE);
2757
2758 /*
2759 * if we're on the last page or the first page and the
2760 * block isn't aligned on a page boundary, do extra checks
2761 * to make sure we don't clean page that is partially dirty
2762 */
2763 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2764 ((i == num_pages - 1) &&
2765 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2766 start = (u64)page->index << PAGE_CACHE_SHIFT;
2767 end = start + PAGE_CACHE_SIZE - 1;
2768 if (test_range_bit(tree, start, end,
2769 EXTENT_DIRTY, 0)) {
Chris Masona61e6f22008-07-22 11:18:08 -04002770 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002771 continue;
2772 }
2773 }
2774 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002775#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2776 spin_lock_irq(&page->mapping->tree_lock);
2777#else
Chris Mason70dec802008-01-29 09:59:12 -05002778 read_lock_irq(&page->mapping->tree_lock);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002779#endif
Chris Masond1310b22008-01-24 16:13:08 -05002780 if (!PageDirty(page)) {
2781 radix_tree_tag_clear(&page->mapping->page_tree,
2782 page_index(page),
2783 PAGECACHE_TAG_DIRTY);
2784 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002785#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2786 spin_unlock_irq(&page->mapping->tree_lock);
2787#else
Chris Mason70dec802008-01-29 09:59:12 -05002788 read_unlock_irq(&page->mapping->tree_lock);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002789#endif
Chris Masona61e6f22008-07-22 11:18:08 -04002790 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002791 }
2792 return 0;
2793}
2794EXPORT_SYMBOL(clear_extent_buffer_dirty);
2795
2796int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
2797 struct extent_buffer *eb)
2798{
2799 return wait_on_extent_writeback(tree, eb->start,
2800 eb->start + eb->len - 1);
2801}
2802EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2803
2804int set_extent_buffer_dirty(struct extent_io_tree *tree,
2805 struct extent_buffer *eb)
2806{
2807 unsigned long i;
2808 unsigned long num_pages;
2809
2810 num_pages = num_extent_pages(eb->start, eb->len);
2811 for (i = 0; i < num_pages; i++) {
2812 struct page *page = extent_buffer_page(eb, i);
2813 /* writepage may need to do something special for the
2814 * first page, we have to make sure page->private is
2815 * properly set. releasepage may drop page->private
2816 * on us if the page isn't already dirty.
2817 */
Chris Masona1b32a52008-09-05 16:09:51 -04002818 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002819 if (i == 0) {
Chris Masond1310b22008-01-24 16:13:08 -05002820 set_page_extent_head(page, eb->len);
2821 } else if (PagePrivate(page) &&
2822 page->private != EXTENT_PAGE_PRIVATE) {
Chris Masond1310b22008-01-24 16:13:08 -05002823 set_page_extent_mapped(page);
Chris Masond1310b22008-01-24 16:13:08 -05002824 }
2825 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
Chris Masona1b32a52008-09-05 16:09:51 -04002826 set_extent_dirty(tree, page_offset(page),
2827 page_offset(page) + PAGE_CACHE_SIZE -1,
2828 GFP_NOFS);
2829 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002830 }
Chris Masona1b32a52008-09-05 16:09:51 -04002831 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05002832}
2833EXPORT_SYMBOL(set_extent_buffer_dirty);
2834
Chris Mason1259ab72008-05-12 13:39:03 -04002835int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
2836 struct extent_buffer *eb)
2837{
2838 unsigned long i;
2839 struct page *page;
2840 unsigned long num_pages;
2841
2842 num_pages = num_extent_pages(eb->start, eb->len);
2843 eb->flags &= ~EXTENT_UPTODATE;
2844
2845 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2846 GFP_NOFS);
2847 for (i = 0; i < num_pages; i++) {
2848 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04002849 if (page)
2850 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04002851 }
2852 return 0;
2853}
2854
Chris Masond1310b22008-01-24 16:13:08 -05002855int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2856 struct extent_buffer *eb)
2857{
2858 unsigned long i;
2859 struct page *page;
2860 unsigned long num_pages;
2861
2862 num_pages = num_extent_pages(eb->start, eb->len);
2863
2864 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2865 GFP_NOFS);
2866 for (i = 0; i < num_pages; i++) {
2867 page = extent_buffer_page(eb, i);
2868 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2869 ((i == num_pages - 1) &&
2870 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2871 check_page_uptodate(tree, page);
2872 continue;
2873 }
2874 SetPageUptodate(page);
2875 }
2876 return 0;
2877}
2878EXPORT_SYMBOL(set_extent_buffer_uptodate);
2879
Chris Masonce9adaa2008-04-09 16:28:12 -04002880int extent_range_uptodate(struct extent_io_tree *tree,
2881 u64 start, u64 end)
2882{
2883 struct page *page;
2884 int ret;
2885 int pg_uptodate = 1;
2886 int uptodate;
2887 unsigned long index;
2888
2889 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2890 if (ret)
2891 return 1;
2892 while(start <= end) {
2893 index = start >> PAGE_CACHE_SHIFT;
2894 page = find_get_page(tree->mapping, index);
2895 uptodate = PageUptodate(page);
2896 page_cache_release(page);
2897 if (!uptodate) {
2898 pg_uptodate = 0;
2899 break;
2900 }
2901 start += PAGE_CACHE_SIZE;
2902 }
2903 return pg_uptodate;
2904}
2905
Chris Masond1310b22008-01-24 16:13:08 -05002906int extent_buffer_uptodate(struct extent_io_tree *tree,
Chris Masonce9adaa2008-04-09 16:28:12 -04002907 struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05002908{
Chris Mason728131d2008-04-09 16:28:12 -04002909 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04002910 unsigned long num_pages;
2911 unsigned long i;
Chris Mason728131d2008-04-09 16:28:12 -04002912 struct page *page;
2913 int pg_uptodate = 1;
2914
Chris Masond1310b22008-01-24 16:13:08 -05002915 if (eb->flags & EXTENT_UPTODATE)
Chris Mason42352982008-04-28 16:40:52 -04002916 return 1;
Chris Mason728131d2008-04-09 16:28:12 -04002917
Chris Mason42352982008-04-28 16:40:52 -04002918 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Chris Masond1310b22008-01-24 16:13:08 -05002919 EXTENT_UPTODATE, 1);
Chris Mason42352982008-04-28 16:40:52 -04002920 if (ret)
2921 return ret;
Chris Mason728131d2008-04-09 16:28:12 -04002922
2923 num_pages = num_extent_pages(eb->start, eb->len);
2924 for (i = 0; i < num_pages; i++) {
2925 page = extent_buffer_page(eb, i);
2926 if (!PageUptodate(page)) {
2927 pg_uptodate = 0;
2928 break;
2929 }
2930 }
Chris Mason42352982008-04-28 16:40:52 -04002931 return pg_uptodate;
Chris Masond1310b22008-01-24 16:13:08 -05002932}
2933EXPORT_SYMBOL(extent_buffer_uptodate);
2934
2935int read_extent_buffer_pages(struct extent_io_tree *tree,
2936 struct extent_buffer *eb,
Chris Masona86c12c2008-02-07 10:50:54 -05002937 u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04002938 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05002939{
2940 unsigned long i;
2941 unsigned long start_i;
2942 struct page *page;
2943 int err;
2944 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04002945 int locked_pages = 0;
2946 int all_uptodate = 1;
2947 int inc_all_pages = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002948 unsigned long num_pages;
Chris Masona86c12c2008-02-07 10:50:54 -05002949 struct bio *bio = NULL;
2950
Chris Masond1310b22008-01-24 16:13:08 -05002951 if (eb->flags & EXTENT_UPTODATE)
2952 return 0;
2953
Chris Masonce9adaa2008-04-09 16:28:12 -04002954 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Chris Masond1310b22008-01-24 16:13:08 -05002955 EXTENT_UPTODATE, 1)) {
2956 return 0;
2957 }
2958
2959 if (start) {
2960 WARN_ON(start < eb->start);
2961 start_i = (start >> PAGE_CACHE_SHIFT) -
2962 (eb->start >> PAGE_CACHE_SHIFT);
2963 } else {
2964 start_i = 0;
2965 }
2966
2967 num_pages = num_extent_pages(eb->start, eb->len);
2968 for (i = start_i; i < num_pages; i++) {
2969 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05002970 if (!wait) {
David Woodhouse2db04962008-08-07 11:19:43 -04002971 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04002972 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05002973 } else {
2974 lock_page(page);
2975 }
Chris Masonce9adaa2008-04-09 16:28:12 -04002976 locked_pages++;
Chris Masond1310b22008-01-24 16:13:08 -05002977 if (!PageUptodate(page)) {
Chris Masonce9adaa2008-04-09 16:28:12 -04002978 all_uptodate = 0;
2979 }
2980 }
2981 if (all_uptodate) {
2982 if (start_i == 0)
2983 eb->flags |= EXTENT_UPTODATE;
Chris Masona1b32a52008-09-05 16:09:51 -04002984 if (ret) {
2985 printk("all up to date but ret is %d\n", ret);
2986 }
Chris Masonce9adaa2008-04-09 16:28:12 -04002987 goto unlock_exit;
2988 }
2989
2990 for (i = start_i; i < num_pages; i++) {
2991 page = extent_buffer_page(eb, i);
2992 if (inc_all_pages)
2993 page_cache_get(page);
2994 if (!PageUptodate(page)) {
2995 if (start_i == 0)
2996 inc_all_pages = 1;
Chris Masonf1885912008-04-09 16:28:12 -04002997 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05002998 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04002999 get_extent, &bio,
3000 mirror_num);
Chris Masond1310b22008-01-24 16:13:08 -05003001 if (err) {
3002 ret = err;
Chris Masona1b32a52008-09-05 16:09:51 -04003003 printk("err %d from __extent_read_full_page\n", ret);
Chris Masond1310b22008-01-24 16:13:08 -05003004 }
3005 } else {
3006 unlock_page(page);
3007 }
3008 }
3009
Chris Masona86c12c2008-02-07 10:50:54 -05003010 if (bio)
Chris Masonf1885912008-04-09 16:28:12 -04003011 submit_one_bio(READ, bio, mirror_num);
Chris Masona86c12c2008-02-07 10:50:54 -05003012
Chris Masond1310b22008-01-24 16:13:08 -05003013 if (ret || !wait) {
Chris Masona1b32a52008-09-05 16:09:51 -04003014 if (ret)
3015 printk("ret %d wait %d returning\n", ret, wait);
Chris Masond1310b22008-01-24 16:13:08 -05003016 return ret;
3017 }
Chris Masond1310b22008-01-24 16:13:08 -05003018 for (i = start_i; i < num_pages; i++) {
3019 page = extent_buffer_page(eb, i);
3020 wait_on_page_locked(page);
3021 if (!PageUptodate(page)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003022 printk("page not uptodate after wait_on_page_locked\n");
Chris Masond1310b22008-01-24 16:13:08 -05003023 ret = -EIO;
3024 }
3025 }
3026 if (!ret)
3027 eb->flags |= EXTENT_UPTODATE;
3028 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04003029
3030unlock_exit:
3031 i = start_i;
3032 while(locked_pages > 0) {
3033 page = extent_buffer_page(eb, i);
3034 i++;
3035 unlock_page(page);
3036 locked_pages--;
3037 }
3038 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003039}
3040EXPORT_SYMBOL(read_extent_buffer_pages);
3041
3042void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3043 unsigned long start,
3044 unsigned long len)
3045{
3046 size_t cur;
3047 size_t offset;
3048 struct page *page;
3049 char *kaddr;
3050 char *dst = (char *)dstv;
3051 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3052 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003053
3054 WARN_ON(start > eb->len);
3055 WARN_ON(start + len > eb->start + eb->len);
3056
3057 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3058
3059 while(len > 0) {
3060 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003061
3062 cur = min(len, (PAGE_CACHE_SIZE - offset));
3063 kaddr = kmap_atomic(page, KM_USER1);
3064 memcpy(dst, kaddr + offset, cur);
3065 kunmap_atomic(kaddr, KM_USER1);
3066
3067 dst += cur;
3068 len -= cur;
3069 offset = 0;
3070 i++;
3071 }
3072}
3073EXPORT_SYMBOL(read_extent_buffer);
3074
3075int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3076 unsigned long min_len, char **token, char **map,
3077 unsigned long *map_start,
3078 unsigned long *map_len, int km)
3079{
3080 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3081 char *kaddr;
3082 struct page *p;
3083 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3084 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3085 unsigned long end_i = (start_offset + start + min_len - 1) >>
3086 PAGE_CACHE_SHIFT;
3087
3088 if (i != end_i)
3089 return -EINVAL;
3090
3091 if (i == 0) {
3092 offset = start_offset;
3093 *map_start = 0;
3094 } else {
3095 offset = 0;
3096 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3097 }
3098 if (start + min_len > eb->len) {
3099printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3100 WARN_ON(1);
3101 }
3102
3103 p = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003104 kaddr = kmap_atomic(p, km);
3105 *token = kaddr;
3106 *map = kaddr + offset;
3107 *map_len = PAGE_CACHE_SIZE - offset;
3108 return 0;
3109}
3110EXPORT_SYMBOL(map_private_extent_buffer);
3111
3112int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3113 unsigned long min_len,
3114 char **token, char **map,
3115 unsigned long *map_start,
3116 unsigned long *map_len, int km)
3117{
3118 int err;
3119 int save = 0;
3120 if (eb->map_token) {
3121 unmap_extent_buffer(eb, eb->map_token, km);
3122 eb->map_token = NULL;
3123 save = 1;
3124 }
3125 err = map_private_extent_buffer(eb, start, min_len, token, map,
3126 map_start, map_len, km);
3127 if (!err && save) {
3128 eb->map_token = *token;
3129 eb->kaddr = *map;
3130 eb->map_start = *map_start;
3131 eb->map_len = *map_len;
3132 }
3133 return err;
3134}
3135EXPORT_SYMBOL(map_extent_buffer);
3136
3137void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3138{
3139 kunmap_atomic(token, km);
3140}
3141EXPORT_SYMBOL(unmap_extent_buffer);
3142
3143int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3144 unsigned long start,
3145 unsigned long len)
3146{
3147 size_t cur;
3148 size_t offset;
3149 struct page *page;
3150 char *kaddr;
3151 char *ptr = (char *)ptrv;
3152 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3153 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3154 int ret = 0;
3155
3156 WARN_ON(start > eb->len);
3157 WARN_ON(start + len > eb->start + eb->len);
3158
3159 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3160
3161 while(len > 0) {
3162 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003163
3164 cur = min(len, (PAGE_CACHE_SIZE - offset));
3165
3166 kaddr = kmap_atomic(page, KM_USER0);
3167 ret = memcmp(ptr, kaddr + offset, cur);
3168 kunmap_atomic(kaddr, KM_USER0);
3169 if (ret)
3170 break;
3171
3172 ptr += cur;
3173 len -= cur;
3174 offset = 0;
3175 i++;
3176 }
3177 return ret;
3178}
3179EXPORT_SYMBOL(memcmp_extent_buffer);
3180
3181void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3182 unsigned long start, unsigned long len)
3183{
3184 size_t cur;
3185 size_t offset;
3186 struct page *page;
3187 char *kaddr;
3188 char *src = (char *)srcv;
3189 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3190 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3191
3192 WARN_ON(start > eb->len);
3193 WARN_ON(start + len > eb->start + eb->len);
3194
3195 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3196
3197 while(len > 0) {
3198 page = extent_buffer_page(eb, i);
3199 WARN_ON(!PageUptodate(page));
3200
3201 cur = min(len, PAGE_CACHE_SIZE - offset);
3202 kaddr = kmap_atomic(page, KM_USER1);
3203 memcpy(kaddr + offset, src, cur);
3204 kunmap_atomic(kaddr, KM_USER1);
3205
3206 src += cur;
3207 len -= cur;
3208 offset = 0;
3209 i++;
3210 }
3211}
3212EXPORT_SYMBOL(write_extent_buffer);
3213
3214void memset_extent_buffer(struct extent_buffer *eb, char c,
3215 unsigned long start, unsigned long len)
3216{
3217 size_t cur;
3218 size_t offset;
3219 struct page *page;
3220 char *kaddr;
3221 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3222 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3223
3224 WARN_ON(start > eb->len);
3225 WARN_ON(start + len > eb->start + eb->len);
3226
3227 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3228
3229 while(len > 0) {
3230 page = extent_buffer_page(eb, i);
3231 WARN_ON(!PageUptodate(page));
3232
3233 cur = min(len, PAGE_CACHE_SIZE - offset);
3234 kaddr = kmap_atomic(page, KM_USER0);
3235 memset(kaddr + offset, c, cur);
3236 kunmap_atomic(kaddr, KM_USER0);
3237
3238 len -= cur;
3239 offset = 0;
3240 i++;
3241 }
3242}
3243EXPORT_SYMBOL(memset_extent_buffer);
3244
3245void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3246 unsigned long dst_offset, unsigned long src_offset,
3247 unsigned long len)
3248{
3249 u64 dst_len = dst->len;
3250 size_t cur;
3251 size_t offset;
3252 struct page *page;
3253 char *kaddr;
3254 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3255 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3256
3257 WARN_ON(src->len != dst_len);
3258
3259 offset = (start_offset + dst_offset) &
3260 ((unsigned long)PAGE_CACHE_SIZE - 1);
3261
3262 while(len > 0) {
3263 page = extent_buffer_page(dst, i);
3264 WARN_ON(!PageUptodate(page));
3265
3266 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3267
3268 kaddr = kmap_atomic(page, KM_USER0);
3269 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3270 kunmap_atomic(kaddr, KM_USER0);
3271
3272 src_offset += cur;
3273 len -= cur;
3274 offset = 0;
3275 i++;
3276 }
3277}
3278EXPORT_SYMBOL(copy_extent_buffer);
3279
3280static void move_pages(struct page *dst_page, struct page *src_page,
3281 unsigned long dst_off, unsigned long src_off,
3282 unsigned long len)
3283{
3284 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3285 if (dst_page == src_page) {
3286 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3287 } else {
3288 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3289 char *p = dst_kaddr + dst_off + len;
3290 char *s = src_kaddr + src_off + len;
3291
3292 while (len--)
3293 *--p = *--s;
3294
3295 kunmap_atomic(src_kaddr, KM_USER1);
3296 }
3297 kunmap_atomic(dst_kaddr, KM_USER0);
3298}
3299
3300static void copy_pages(struct page *dst_page, struct page *src_page,
3301 unsigned long dst_off, unsigned long src_off,
3302 unsigned long len)
3303{
3304 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3305 char *src_kaddr;
3306
3307 if (dst_page != src_page)
3308 src_kaddr = kmap_atomic(src_page, KM_USER1);
3309 else
3310 src_kaddr = dst_kaddr;
3311
3312 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3313 kunmap_atomic(dst_kaddr, KM_USER0);
3314 if (dst_page != src_page)
3315 kunmap_atomic(src_kaddr, KM_USER1);
3316}
3317
3318void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3319 unsigned long src_offset, unsigned long len)
3320{
3321 size_t cur;
3322 size_t dst_off_in_page;
3323 size_t src_off_in_page;
3324 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3325 unsigned long dst_i;
3326 unsigned long src_i;
3327
3328 if (src_offset + len > dst->len) {
3329 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3330 src_offset, len, dst->len);
3331 BUG_ON(1);
3332 }
3333 if (dst_offset + len > dst->len) {
3334 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3335 dst_offset, len, dst->len);
3336 BUG_ON(1);
3337 }
3338
3339 while(len > 0) {
3340 dst_off_in_page = (start_offset + dst_offset) &
3341 ((unsigned long)PAGE_CACHE_SIZE - 1);
3342 src_off_in_page = (start_offset + src_offset) &
3343 ((unsigned long)PAGE_CACHE_SIZE - 1);
3344
3345 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3346 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3347
3348 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3349 src_off_in_page));
3350 cur = min_t(unsigned long, cur,
3351 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3352
3353 copy_pages(extent_buffer_page(dst, dst_i),
3354 extent_buffer_page(dst, src_i),
3355 dst_off_in_page, src_off_in_page, cur);
3356
3357 src_offset += cur;
3358 dst_offset += cur;
3359 len -= cur;
3360 }
3361}
3362EXPORT_SYMBOL(memcpy_extent_buffer);
3363
3364void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3365 unsigned long src_offset, unsigned long len)
3366{
3367 size_t cur;
3368 size_t dst_off_in_page;
3369 size_t src_off_in_page;
3370 unsigned long dst_end = dst_offset + len - 1;
3371 unsigned long src_end = src_offset + len - 1;
3372 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3373 unsigned long dst_i;
3374 unsigned long src_i;
3375
3376 if (src_offset + len > dst->len) {
3377 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3378 src_offset, len, dst->len);
3379 BUG_ON(1);
3380 }
3381 if (dst_offset + len > dst->len) {
3382 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3383 dst_offset, len, dst->len);
3384 BUG_ON(1);
3385 }
3386 if (dst_offset < src_offset) {
3387 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3388 return;
3389 }
3390 while(len > 0) {
3391 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3392 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3393
3394 dst_off_in_page = (start_offset + dst_end) &
3395 ((unsigned long)PAGE_CACHE_SIZE - 1);
3396 src_off_in_page = (start_offset + src_end) &
3397 ((unsigned long)PAGE_CACHE_SIZE - 1);
3398
3399 cur = min_t(unsigned long, len, src_off_in_page + 1);
3400 cur = min(cur, dst_off_in_page + 1);
3401 move_pages(extent_buffer_page(dst, dst_i),
3402 extent_buffer_page(dst, src_i),
3403 dst_off_in_page - cur + 1,
3404 src_off_in_page - cur + 1, cur);
3405
3406 dst_end -= cur;
3407 src_end -= cur;
3408 len -= cur;
3409 }
3410}
3411EXPORT_SYMBOL(memmove_extent_buffer);
Chris Mason6af118c2008-07-22 11:18:07 -04003412
3413int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3414{
3415 u64 start = page_offset(page);
3416 struct extent_buffer *eb;
3417 int ret = 1;
3418 unsigned long i;
3419 unsigned long num_pages;
3420
3421 spin_lock(&tree->buffer_lock);
3422 eb = buffer_search(tree, start);
3423 if (!eb)
3424 goto out;
3425
3426 if (atomic_read(&eb->refs) > 1) {
3427 ret = 0;
3428 goto out;
3429 }
3430 /* at this point we can safely release the extent buffer */
3431 num_pages = num_extent_pages(eb->start, eb->len);
Christoph Hellwigb2141072008-09-05 16:43:31 -04003432 for (i = 0; i < num_pages; i++)
3433 page_cache_release(extent_buffer_page(eb, i));
Chris Mason6af118c2008-07-22 11:18:07 -04003434 rb_erase(&eb->rb_node, &tree->buffer);
3435 __free_extent_buffer(eb);
3436out:
3437 spin_unlock(&tree->buffer_lock);
3438 return ret;
3439}
3440EXPORT_SYMBOL(try_release_extent_buffer);