blob: 1a57c17d4029ff72ab81d2be418838d350476aa3 [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05005#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050011#include <linux/writeback.h>
12#include <linux/pagevec.h>
13#include "extent_io.h"
14#include "extent_map.h"
David Woodhouse2db04962008-08-07 11:19:43 -040015#include "compat.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040016#include "ctree.h"
17#include "btrfs_inode.h"
Chris Masond1310b22008-01-24 16:13:08 -050018
Chris Masond1310b22008-01-24 16:13:08 -050019static struct kmem_cache *extent_state_cache;
20static struct kmem_cache *extent_buffer_cache;
21
22static LIST_HEAD(buffers);
23static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040024
Chris Masonb47eda82008-11-10 12:34:40 -050025#define LEAK_DEBUG 0
Chris Mason39351272009-02-04 09:24:05 -050026#if LEAK_DEBUG
Chris Masond3977122009-01-05 21:25:51 -050027static DEFINE_SPINLOCK(leak_lock);
Chris Mason4bef0842008-09-08 11:18:08 -040028#endif
Chris Masond1310b22008-01-24 16:13:08 -050029
Chris Masond1310b22008-01-24 16:13:08 -050030#define BUFFER_LRU_MAX 64
31
32struct tree_entry {
33 u64 start;
34 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -050035 struct rb_node rb_node;
36};
37
38struct extent_page_data {
39 struct bio *bio;
40 struct extent_io_tree *tree;
41 get_extent_t *get_extent;
Chris Mason771ed682008-11-06 22:02:51 -050042
43 /* tells writepage not to lock the state bits for this range
44 * it still does the unlocking
45 */
Chris Masonffbd5172009-04-20 15:50:09 -040046 unsigned int extent_locked:1;
47
48 /* tells the submit_bio code to use a WRITE_SYNC */
49 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -050050};
51
52int __init extent_io_init(void)
53{
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020054 extent_state_cache = kmem_cache_create("extent_state",
55 sizeof(struct extent_state), 0,
56 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050057 if (!extent_state_cache)
58 return -ENOMEM;
59
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020060 extent_buffer_cache = kmem_cache_create("extent_buffers",
61 sizeof(struct extent_buffer), 0,
62 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050063 if (!extent_buffer_cache)
64 goto free_state_cache;
65 return 0;
66
67free_state_cache:
68 kmem_cache_destroy(extent_state_cache);
69 return -ENOMEM;
70}
71
72void extent_io_exit(void)
73{
74 struct extent_state *state;
Chris Mason2d2ae542008-03-26 16:24:23 -040075 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -050076
77 while (!list_empty(&states)) {
Chris Mason2d2ae542008-03-26 16:24:23 -040078 state = list_entry(states.next, struct extent_state, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050079 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
80 "state %lu in tree %p refs %d\n",
81 (unsigned long long)state->start,
82 (unsigned long long)state->end,
83 state->state, state->tree, atomic_read(&state->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040084 list_del(&state->leak_list);
Chris Masond1310b22008-01-24 16:13:08 -050085 kmem_cache_free(extent_state_cache, state);
86
87 }
88
Chris Mason2d2ae542008-03-26 16:24:23 -040089 while (!list_empty(&buffers)) {
90 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050091 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
92 "refs %d\n", (unsigned long long)eb->start,
93 eb->len, atomic_read(&eb->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040094 list_del(&eb->leak_list);
95 kmem_cache_free(extent_buffer_cache, eb);
96 }
Chris Masond1310b22008-01-24 16:13:08 -050097 if (extent_state_cache)
98 kmem_cache_destroy(extent_state_cache);
99 if (extent_buffer_cache)
100 kmem_cache_destroy(extent_buffer_cache);
101}
102
103void extent_io_tree_init(struct extent_io_tree *tree,
104 struct address_space *mapping, gfp_t mask)
105{
Eric Paris6bef4d32010-02-23 19:43:04 +0000106 tree->state = RB_ROOT;
107 tree->buffer = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500108 tree->ops = NULL;
109 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500110 spin_lock_init(&tree->lock);
Chris Mason6af118c2008-07-22 11:18:07 -0400111 spin_lock_init(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -0500112 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500113}
Chris Masond1310b22008-01-24 16:13:08 -0500114
Christoph Hellwigb2950862008-12-02 09:54:17 -0500115static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500116{
117 struct extent_state *state;
Chris Mason39351272009-02-04 09:24:05 -0500118#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400119 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400120#endif
Chris Masond1310b22008-01-24 16:13:08 -0500121
122 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400123 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500124 return state;
125 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500126 state->private = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500127 state->tree = NULL;
Chris Mason39351272009-02-04 09:24:05 -0500128#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400129 spin_lock_irqsave(&leak_lock, flags);
130 list_add(&state->leak_list, &states);
131 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400132#endif
Chris Masond1310b22008-01-24 16:13:08 -0500133 atomic_set(&state->refs, 1);
134 init_waitqueue_head(&state->wq);
135 return state;
136}
Chris Masond1310b22008-01-24 16:13:08 -0500137
Christoph Hellwigb2950862008-12-02 09:54:17 -0500138static void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500139{
Chris Masond1310b22008-01-24 16:13:08 -0500140 if (!state)
141 return;
142 if (atomic_dec_and_test(&state->refs)) {
Chris Mason39351272009-02-04 09:24:05 -0500143#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400144 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400145#endif
Chris Mason70dec802008-01-29 09:59:12 -0500146 WARN_ON(state->tree);
Chris Mason39351272009-02-04 09:24:05 -0500147#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400148 spin_lock_irqsave(&leak_lock, flags);
149 list_del(&state->leak_list);
150 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400151#endif
Chris Masond1310b22008-01-24 16:13:08 -0500152 kmem_cache_free(extent_state_cache, state);
153 }
154}
Chris Masond1310b22008-01-24 16:13:08 -0500155
156static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
157 struct rb_node *node)
158{
Chris Masond3977122009-01-05 21:25:51 -0500159 struct rb_node **p = &root->rb_node;
160 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500161 struct tree_entry *entry;
162
Chris Masond3977122009-01-05 21:25:51 -0500163 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500164 parent = *p;
165 entry = rb_entry(parent, struct tree_entry, rb_node);
166
167 if (offset < entry->start)
168 p = &(*p)->rb_left;
169 else if (offset > entry->end)
170 p = &(*p)->rb_right;
171 else
172 return parent;
173 }
174
175 entry = rb_entry(node, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500176 rb_link_node(node, parent, p);
177 rb_insert_color(node, root);
178 return NULL;
179}
180
Chris Mason80ea96b2008-02-01 14:51:59 -0500181static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Chris Masond1310b22008-01-24 16:13:08 -0500182 struct rb_node **prev_ret,
183 struct rb_node **next_ret)
184{
Chris Mason80ea96b2008-02-01 14:51:59 -0500185 struct rb_root *root = &tree->state;
Chris Masond3977122009-01-05 21:25:51 -0500186 struct rb_node *n = root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500187 struct rb_node *prev = NULL;
188 struct rb_node *orig_prev = NULL;
189 struct tree_entry *entry;
190 struct tree_entry *prev_entry = NULL;
191
Chris Masond3977122009-01-05 21:25:51 -0500192 while (n) {
Chris Masond1310b22008-01-24 16:13:08 -0500193 entry = rb_entry(n, struct tree_entry, rb_node);
194 prev = n;
195 prev_entry = entry;
196
197 if (offset < entry->start)
198 n = n->rb_left;
199 else if (offset > entry->end)
200 n = n->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500201 else
Chris Masond1310b22008-01-24 16:13:08 -0500202 return n;
203 }
204
205 if (prev_ret) {
206 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500207 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500208 prev = rb_next(prev);
209 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
210 }
211 *prev_ret = prev;
212 prev = orig_prev;
213 }
214
215 if (next_ret) {
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500217 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500218 prev = rb_prev(prev);
219 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
220 }
221 *next_ret = prev;
222 }
223 return NULL;
224}
225
Chris Mason80ea96b2008-02-01 14:51:59 -0500226static inline struct rb_node *tree_search(struct extent_io_tree *tree,
227 u64 offset)
Chris Masond1310b22008-01-24 16:13:08 -0500228{
Chris Mason70dec802008-01-29 09:59:12 -0500229 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500230 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500231
Chris Mason80ea96b2008-02-01 14:51:59 -0500232 ret = __etree_search(tree, offset, &prev, NULL);
Chris Masond3977122009-01-05 21:25:51 -0500233 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500234 return prev;
235 return ret;
236}
237
Chris Mason6af118c2008-07-22 11:18:07 -0400238static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
239 u64 offset, struct rb_node *node)
240{
241 struct rb_root *root = &tree->buffer;
Chris Masond3977122009-01-05 21:25:51 -0500242 struct rb_node **p = &root->rb_node;
243 struct rb_node *parent = NULL;
Chris Mason6af118c2008-07-22 11:18:07 -0400244 struct extent_buffer *eb;
245
Chris Masond3977122009-01-05 21:25:51 -0500246 while (*p) {
Chris Mason6af118c2008-07-22 11:18:07 -0400247 parent = *p;
248 eb = rb_entry(parent, struct extent_buffer, rb_node);
249
250 if (offset < eb->start)
251 p = &(*p)->rb_left;
252 else if (offset > eb->start)
253 p = &(*p)->rb_right;
254 else
255 return eb;
256 }
257
258 rb_link_node(node, parent, p);
259 rb_insert_color(node, root);
260 return NULL;
261}
262
263static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
264 u64 offset)
265{
266 struct rb_root *root = &tree->buffer;
Chris Masond3977122009-01-05 21:25:51 -0500267 struct rb_node *n = root->rb_node;
Chris Mason6af118c2008-07-22 11:18:07 -0400268 struct extent_buffer *eb;
269
Chris Masond3977122009-01-05 21:25:51 -0500270 while (n) {
Chris Mason6af118c2008-07-22 11:18:07 -0400271 eb = rb_entry(n, struct extent_buffer, rb_node);
272 if (offset < eb->start)
273 n = n->rb_left;
274 else if (offset > eb->start)
275 n = n->rb_right;
276 else
277 return eb;
278 }
279 return NULL;
280}
281
Josef Bacik9ed74f22009-09-11 16:12:44 -0400282static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
283 struct extent_state *other)
284{
285 if (tree->ops && tree->ops->merge_extent_hook)
286 tree->ops->merge_extent_hook(tree->mapping->host, new,
287 other);
288}
289
Chris Masond1310b22008-01-24 16:13:08 -0500290/*
291 * utility function to look for merge candidates inside a given range.
292 * Any extents with matching state are merged together into a single
293 * extent in the tree. Extents with EXTENT_IO in their state field
294 * are not merged because the end_io handlers need to be able to do
295 * operations on them without sleeping (or doing allocations/splits).
296 *
297 * This should be called with the tree lock held.
298 */
299static int merge_state(struct extent_io_tree *tree,
300 struct extent_state *state)
301{
302 struct extent_state *other;
303 struct rb_node *other_node;
304
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400305 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Chris Masond1310b22008-01-24 16:13:08 -0500306 return 0;
307
308 other_node = rb_prev(&state->rb_node);
309 if (other_node) {
310 other = rb_entry(other_node, struct extent_state, rb_node);
311 if (other->end == state->start - 1 &&
312 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400313 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500314 state->start = other->start;
Chris Mason70dec802008-01-29 09:59:12 -0500315 other->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500316 rb_erase(&other->rb_node, &tree->state);
317 free_extent_state(other);
318 }
319 }
320 other_node = rb_next(&state->rb_node);
321 if (other_node) {
322 other = rb_entry(other_node, struct extent_state, rb_node);
323 if (other->start == state->end + 1 &&
324 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400325 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500326 other->start = state->start;
Chris Mason70dec802008-01-29 09:59:12 -0500327 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500328 rb_erase(&state->rb_node, &tree->state);
329 free_extent_state(state);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400330 state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500331 }
332 }
Josef Bacik9ed74f22009-09-11 16:12:44 -0400333
Chris Masond1310b22008-01-24 16:13:08 -0500334 return 0;
335}
336
Josef Bacik9ed74f22009-09-11 16:12:44 -0400337static int set_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400338 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500339{
340 if (tree->ops && tree->ops->set_bit_hook) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400341 return tree->ops->set_bit_hook(tree->mapping->host,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400342 state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500343 }
Josef Bacik9ed74f22009-09-11 16:12:44 -0400344
345 return 0;
Chris Mason291d6732008-01-29 15:55:23 -0500346}
347
348static void clear_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400349 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500350{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400351 if (tree->ops && tree->ops->clear_bit_hook)
352 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500353}
354
Chris Masond1310b22008-01-24 16:13:08 -0500355/*
356 * insert an extent_state struct into the tree. 'bits' are set on the
357 * struct before it is inserted.
358 *
359 * This may return -EEXIST if the extent is already there, in which case the
360 * state struct is freed.
361 *
362 * The tree lock is not taken internally. This is a utility function and
363 * probably isn't what you want to call (see set/clear_extent_bit).
364 */
365static int insert_state(struct extent_io_tree *tree,
366 struct extent_state *state, u64 start, u64 end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400367 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500368{
369 struct rb_node *node;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400370 int bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400371 int ret;
Chris Masond1310b22008-01-24 16:13:08 -0500372
373 if (end < start) {
Chris Masond3977122009-01-05 21:25:51 -0500374 printk(KERN_ERR "btrfs end < start %llu %llu\n",
375 (unsigned long long)end,
376 (unsigned long long)start);
Chris Masond1310b22008-01-24 16:13:08 -0500377 WARN_ON(1);
378 }
Chris Masond1310b22008-01-24 16:13:08 -0500379 state->start = start;
380 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400381 ret = set_state_cb(tree, state, bits);
382 if (ret)
383 return ret;
384
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400385 if (bits_to_set & EXTENT_DIRTY)
Josef Bacik9ed74f22009-09-11 16:12:44 -0400386 tree->dirty_bytes += end - start + 1;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400387 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500388 node = tree_insert(&tree->state, end, &state->rb_node);
389 if (node) {
390 struct extent_state *found;
391 found = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500392 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
393 "%llu %llu\n", (unsigned long long)found->start,
394 (unsigned long long)found->end,
395 (unsigned long long)start, (unsigned long long)end);
Chris Masond1310b22008-01-24 16:13:08 -0500396 free_extent_state(state);
397 return -EEXIST;
398 }
Chris Mason70dec802008-01-29 09:59:12 -0500399 state->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500400 merge_state(tree, state);
401 return 0;
402}
403
Josef Bacik9ed74f22009-09-11 16:12:44 -0400404static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
405 u64 split)
406{
407 if (tree->ops && tree->ops->split_extent_hook)
408 return tree->ops->split_extent_hook(tree->mapping->host,
409 orig, split);
410 return 0;
411}
412
Chris Masond1310b22008-01-24 16:13:08 -0500413/*
414 * split a given extent state struct in two, inserting the preallocated
415 * struct 'prealloc' as the newly created second half. 'split' indicates an
416 * offset inside 'orig' where it should be split.
417 *
418 * Before calling,
419 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
420 * are two extent state structs in the tree:
421 * prealloc: [orig->start, split - 1]
422 * orig: [ split, orig->end ]
423 *
424 * The tree locks are not taken by this function. They need to be held
425 * by the caller.
426 */
427static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
428 struct extent_state *prealloc, u64 split)
429{
430 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400431
432 split_cb(tree, orig, split);
433
Chris Masond1310b22008-01-24 16:13:08 -0500434 prealloc->start = orig->start;
435 prealloc->end = split - 1;
436 prealloc->state = orig->state;
437 orig->start = split;
438
439 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
440 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500441 free_extent_state(prealloc);
442 return -EEXIST;
443 }
Chris Mason70dec802008-01-29 09:59:12 -0500444 prealloc->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500445 return 0;
446}
447
448/*
449 * utility function to clear some bits in an extent state struct.
450 * it will optionally wake up any one waiting on this state (wake == 1), or
451 * forcibly remove the state from the tree (delete == 1).
452 *
453 * If no bits are set on the state struct after clearing things, the
454 * struct is freed and removed from the tree
455 */
456static int clear_state_bit(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400457 struct extent_state *state,
458 int *bits, int wake)
Chris Masond1310b22008-01-24 16:13:08 -0500459{
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400460 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
Josef Bacik32c00af2009-10-08 13:34:05 -0400461 int ret = state->state & bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500462
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400463 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500464 u64 range = state->end - state->start + 1;
465 WARN_ON(range > tree->dirty_bytes);
466 tree->dirty_bytes -= range;
467 }
Chris Mason291d6732008-01-29 15:55:23 -0500468 clear_state_cb(tree, state, bits);
Josef Bacik32c00af2009-10-08 13:34:05 -0400469 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500470 if (wake)
471 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400472 if (state->state == 0) {
Chris Mason70dec802008-01-29 09:59:12 -0500473 if (state->tree) {
Chris Masond1310b22008-01-24 16:13:08 -0500474 rb_erase(&state->rb_node, &tree->state);
Chris Mason70dec802008-01-29 09:59:12 -0500475 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500476 free_extent_state(state);
477 } else {
478 WARN_ON(1);
479 }
480 } else {
481 merge_state(tree, state);
482 }
483 return ret;
484}
485
486/*
487 * clear some bits on a range in the tree. This may require splitting
488 * or inserting elements in the tree, so the gfp mask is used to
489 * indicate which allocations or sleeping are allowed.
490 *
491 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
492 * the given range from the tree regardless of state (ie for truncate).
493 *
494 * the range [start, end] is inclusive.
495 *
496 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
497 * bits were already set, or zero if none of the bits were already set.
498 */
499int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -0400500 int bits, int wake, int delete,
501 struct extent_state **cached_state,
502 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500503{
504 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400505 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500506 struct extent_state *prealloc = NULL;
Chris Mason2c64c532009-09-02 15:04:12 -0400507 struct rb_node *next_node;
Chris Masond1310b22008-01-24 16:13:08 -0500508 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400509 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500510 int err;
511 int set = 0;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000512 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500513
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400514 if (delete)
515 bits |= ~EXTENT_CTLBITS;
516 bits |= EXTENT_FIRST_DELALLOC;
517
Josef Bacik2ac55d42010-02-03 19:33:23 +0000518 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
519 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500520again:
521 if (!prealloc && (mask & __GFP_WAIT)) {
522 prealloc = alloc_extent_state(mask);
523 if (!prealloc)
524 return -ENOMEM;
525 }
526
Chris Masoncad321a2008-12-17 14:51:42 -0500527 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400528 if (cached_state) {
529 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000530
531 if (clear) {
532 *cached_state = NULL;
533 cached_state = NULL;
534 }
535
Chris Mason42daec22009-09-23 19:51:09 -0400536 if (cached && cached->tree && cached->start == start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000537 if (clear)
538 atomic_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400539 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400540 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400541 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000542 if (clear)
543 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400544 }
Chris Masond1310b22008-01-24 16:13:08 -0500545 /*
546 * this search will find the extents that end after
547 * our range starts
548 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500549 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500550 if (!node)
551 goto out;
552 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400553hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500554 if (state->start > end)
555 goto out;
556 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400557 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500558
559 /*
560 * | ---- desired range ---- |
561 * | state | or
562 * | ------------- state -------------- |
563 *
564 * We need to split the extent we found, and may flip
565 * bits on second half.
566 *
567 * If the extent we found extends past our range, we
568 * just split and search again. It'll get split again
569 * the next time though.
570 *
571 * If the extent we found is inside our range, we clear
572 * the desired bit on it.
573 */
574
575 if (state->start < start) {
Chris Mason70dec802008-01-29 09:59:12 -0500576 if (!prealloc)
577 prealloc = alloc_extent_state(GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500578 err = split_state(tree, state, prealloc, start);
579 BUG_ON(err == -EEXIST);
580 prealloc = NULL;
581 if (err)
582 goto out;
583 if (state->end <= end) {
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400584 set |= clear_state_bit(tree, state, &bits, wake);
Yan Zheng5c939df2009-05-27 09:16:03 -0400585 if (last_end == (u64)-1)
586 goto out;
587 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500588 }
589 goto search_again;
590 }
591 /*
592 * | ---- desired range ---- |
593 * | state |
594 * We need to split the extent, and clear the bit
595 * on the first half
596 */
597 if (state->start <= end && state->end > end) {
Chris Mason70dec802008-01-29 09:59:12 -0500598 if (!prealloc)
599 prealloc = alloc_extent_state(GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500600 err = split_state(tree, state, prealloc, end + 1);
601 BUG_ON(err == -EEXIST);
Chris Masond1310b22008-01-24 16:13:08 -0500602 if (wake)
603 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400604
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400605 set |= clear_state_bit(tree, prealloc, &bits, wake);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400606
Chris Masond1310b22008-01-24 16:13:08 -0500607 prealloc = NULL;
608 goto out;
609 }
Chris Mason42daec22009-09-23 19:51:09 -0400610
Chris Mason2c64c532009-09-02 15:04:12 -0400611 if (state->end < end && prealloc && !need_resched())
612 next_node = rb_next(&state->rb_node);
613 else
614 next_node = NULL;
Chris Mason42daec22009-09-23 19:51:09 -0400615
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400616 set |= clear_state_bit(tree, state, &bits, wake);
Yan Zheng5c939df2009-05-27 09:16:03 -0400617 if (last_end == (u64)-1)
618 goto out;
619 start = last_end + 1;
Chris Mason2c64c532009-09-02 15:04:12 -0400620 if (start <= end && next_node) {
621 state = rb_entry(next_node, struct extent_state,
622 rb_node);
623 if (state->start == start)
624 goto hit_next;
625 }
Chris Masond1310b22008-01-24 16:13:08 -0500626 goto search_again;
627
628out:
Chris Masoncad321a2008-12-17 14:51:42 -0500629 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500630 if (prealloc)
631 free_extent_state(prealloc);
632
633 return set;
634
635search_again:
636 if (start > end)
637 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500638 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500639 if (mask & __GFP_WAIT)
640 cond_resched();
641 goto again;
642}
Chris Masond1310b22008-01-24 16:13:08 -0500643
644static int wait_on_state(struct extent_io_tree *tree,
645 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500646 __releases(tree->lock)
647 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500648{
649 DEFINE_WAIT(wait);
650 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500651 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500652 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500653 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500654 finish_wait(&state->wq, &wait);
655 return 0;
656}
657
658/*
659 * waits for one or more bits to clear on a range in the state tree.
660 * The range [start, end] is inclusive.
661 * The tree lock is taken by this function
662 */
663int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
664{
665 struct extent_state *state;
666 struct rb_node *node;
667
Chris Masoncad321a2008-12-17 14:51:42 -0500668 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500669again:
670 while (1) {
671 /*
672 * this search will find all the extents that end after
673 * our range starts
674 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500675 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500676 if (!node)
677 break;
678
679 state = rb_entry(node, struct extent_state, rb_node);
680
681 if (state->start > end)
682 goto out;
683
684 if (state->state & bits) {
685 start = state->start;
686 atomic_inc(&state->refs);
687 wait_on_state(tree, state);
688 free_extent_state(state);
689 goto again;
690 }
691 start = state->end + 1;
692
693 if (start > end)
694 break;
695
696 if (need_resched()) {
Chris Masoncad321a2008-12-17 14:51:42 -0500697 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500698 cond_resched();
Chris Masoncad321a2008-12-17 14:51:42 -0500699 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500700 }
701 }
702out:
Chris Masoncad321a2008-12-17 14:51:42 -0500703 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500704 return 0;
705}
Chris Masond1310b22008-01-24 16:13:08 -0500706
Josef Bacik9ed74f22009-09-11 16:12:44 -0400707static int set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500708 struct extent_state *state,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400709 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500710{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400711 int ret;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400712 int bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400713
714 ret = set_state_cb(tree, state, bits);
715 if (ret)
716 return ret;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400717 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500718 u64 range = state->end - state->start + 1;
719 tree->dirty_bytes += range;
720 }
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400721 state->state |= bits_to_set;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400722
723 return 0;
Chris Masond1310b22008-01-24 16:13:08 -0500724}
725
Chris Mason2c64c532009-09-02 15:04:12 -0400726static void cache_state(struct extent_state *state,
727 struct extent_state **cached_ptr)
728{
729 if (cached_ptr && !(*cached_ptr)) {
730 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
731 *cached_ptr = state;
732 atomic_inc(&state->refs);
733 }
734 }
735}
736
Chris Masond1310b22008-01-24 16:13:08 -0500737/*
Chris Mason1edbb732009-09-02 13:24:36 -0400738 * set some bits on a range in the tree. This may require allocations or
739 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500740 *
Chris Mason1edbb732009-09-02 13:24:36 -0400741 * If any of the exclusive bits are set, this will fail with -EEXIST if some
742 * part of the range already has the desired bits set. The start of the
743 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500744 *
Chris Mason1edbb732009-09-02 13:24:36 -0400745 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500746 */
Chris Mason1edbb732009-09-02 13:24:36 -0400747
Chris Masond3977122009-01-05 21:25:51 -0500748static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason1edbb732009-09-02 13:24:36 -0400749 int bits, int exclusive_bits, u64 *failed_start,
Chris Mason2c64c532009-09-02 15:04:12 -0400750 struct extent_state **cached_state,
Chris Masond3977122009-01-05 21:25:51 -0500751 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500752{
753 struct extent_state *state;
754 struct extent_state *prealloc = NULL;
755 struct rb_node *node;
Chris Masond1310b22008-01-24 16:13:08 -0500756 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500757 u64 last_start;
758 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400759
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400760 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500761again:
762 if (!prealloc && (mask & __GFP_WAIT)) {
763 prealloc = alloc_extent_state(mask);
764 if (!prealloc)
765 return -ENOMEM;
766 }
767
Chris Masoncad321a2008-12-17 14:51:42 -0500768 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400769 if (cached_state && *cached_state) {
770 state = *cached_state;
771 if (state->start == start && state->tree) {
772 node = &state->rb_node;
773 goto hit_next;
774 }
775 }
Chris Masond1310b22008-01-24 16:13:08 -0500776 /*
777 * this search will find all the extents that end after
778 * our range starts.
779 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500780 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500781 if (!node) {
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400782 err = insert_state(tree, prealloc, start, end, &bits);
Chris Masond1310b22008-01-24 16:13:08 -0500783 prealloc = NULL;
784 BUG_ON(err == -EEXIST);
785 goto out;
786 }
Chris Masond1310b22008-01-24 16:13:08 -0500787 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400788hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500789 last_start = state->start;
790 last_end = state->end;
791
792 /*
793 * | ---- desired range ---- |
794 * | state |
795 *
796 * Just lock what we found and keep going
797 */
798 if (state->start == start && state->end <= end) {
Chris Mason40431d62009-08-05 12:57:59 -0400799 struct rb_node *next_node;
Chris Mason1edbb732009-09-02 13:24:36 -0400800 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500801 *failed_start = state->start;
802 err = -EEXIST;
803 goto out;
804 }
Chris Mason42daec22009-09-23 19:51:09 -0400805
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400806 err = set_state_bits(tree, state, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400807 if (err)
808 goto out;
809
Chris Mason2c64c532009-09-02 15:04:12 -0400810 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500811 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400812 if (last_end == (u64)-1)
813 goto out;
Chris Mason40431d62009-08-05 12:57:59 -0400814
Yan Zheng5c939df2009-05-27 09:16:03 -0400815 start = last_end + 1;
Chris Mason40431d62009-08-05 12:57:59 -0400816 if (start < end && prealloc && !need_resched()) {
817 next_node = rb_next(node);
818 if (next_node) {
819 state = rb_entry(next_node, struct extent_state,
820 rb_node);
821 if (state->start == start)
822 goto hit_next;
823 }
824 }
Chris Masond1310b22008-01-24 16:13:08 -0500825 goto search_again;
826 }
827
828 /*
829 * | ---- desired range ---- |
830 * | state |
831 * or
832 * | ------------- state -------------- |
833 *
834 * We need to split the extent we found, and may flip bits on
835 * second half.
836 *
837 * If the extent we found extends past our
838 * range, we just split and search again. It'll get split
839 * again the next time though.
840 *
841 * If the extent we found is inside our range, we set the
842 * desired bit on it.
843 */
844 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400845 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500846 *failed_start = start;
847 err = -EEXIST;
848 goto out;
849 }
850 err = split_state(tree, state, prealloc, start);
851 BUG_ON(err == -EEXIST);
852 prealloc = NULL;
853 if (err)
854 goto out;
855 if (state->end <= end) {
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400856 err = set_state_bits(tree, state, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400857 if (err)
858 goto out;
Chris Mason2c64c532009-09-02 15:04:12 -0400859 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500860 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400861 if (last_end == (u64)-1)
862 goto out;
863 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500864 }
865 goto search_again;
866 }
867 /*
868 * | ---- desired range ---- |
869 * | state | or | state |
870 *
871 * There's a hole, we need to insert something in it and
872 * ignore the extent we found.
873 */
874 if (state->start > start) {
875 u64 this_end;
876 if (end < last_start)
877 this_end = end;
878 else
Chris Masond3977122009-01-05 21:25:51 -0500879 this_end = last_start - 1;
Chris Masond1310b22008-01-24 16:13:08 -0500880 err = insert_state(tree, prealloc, start, this_end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400881 &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400882 BUG_ON(err == -EEXIST);
883 if (err) {
884 prealloc = NULL;
885 goto out;
886 }
Chris Mason2c64c532009-09-02 15:04:12 -0400887 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500888 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500889 start = this_end + 1;
890 goto search_again;
891 }
892 /*
893 * | ---- desired range ---- |
894 * | state |
895 * We need to split the extent, and set the bit
896 * on the first half
897 */
898 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400899 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500900 *failed_start = start;
901 err = -EEXIST;
902 goto out;
903 }
904 err = split_state(tree, state, prealloc, end + 1);
905 BUG_ON(err == -EEXIST);
906
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400907 err = set_state_bits(tree, prealloc, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400908 if (err) {
909 prealloc = NULL;
910 goto out;
911 }
Chris Mason2c64c532009-09-02 15:04:12 -0400912 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500913 merge_state(tree, prealloc);
914 prealloc = NULL;
915 goto out;
916 }
917
918 goto search_again;
919
920out:
Chris Masoncad321a2008-12-17 14:51:42 -0500921 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500922 if (prealloc)
923 free_extent_state(prealloc);
924
925 return err;
926
927search_again:
928 if (start > end)
929 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500930 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500931 if (mask & __GFP_WAIT)
932 cond_resched();
933 goto again;
934}
Chris Masond1310b22008-01-24 16:13:08 -0500935
936/* wrappers around set/clear extent bit */
937int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
938 gfp_t mask)
939{
940 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -0400941 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500942}
Chris Masond1310b22008-01-24 16:13:08 -0500943
944int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
945 int bits, gfp_t mask)
946{
947 return set_extent_bit(tree, start, end, bits, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -0400948 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500949}
Chris Masond1310b22008-01-24 16:13:08 -0500950
951int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
952 int bits, gfp_t mask)
953{
Chris Mason2c64c532009-09-02 15:04:12 -0400954 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500955}
Chris Masond1310b22008-01-24 16:13:08 -0500956
957int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000958 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500959{
960 return set_extent_bit(tree, start, end,
Chris Mason40431d62009-08-05 12:57:59 -0400961 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000962 0, NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500963}
Chris Masond1310b22008-01-24 16:13:08 -0500964
965int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
966 gfp_t mask)
967{
968 return clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -0400969 EXTENT_DIRTY | EXTENT_DELALLOC |
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400970 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500971}
Chris Masond1310b22008-01-24 16:13:08 -0500972
973int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
974 gfp_t mask)
975{
976 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -0400977 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500978}
Chris Masond1310b22008-01-24 16:13:08 -0500979
Christoph Hellwigb2950862008-12-02 09:54:17 -0500980static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
Chris Masond1310b22008-01-24 16:13:08 -0500981 gfp_t mask)
982{
Chris Mason2c64c532009-09-02 15:04:12 -0400983 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
984 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500985}
Chris Masond1310b22008-01-24 16:13:08 -0500986
987int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
988 gfp_t mask)
989{
990 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -0400991 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500992}
Chris Masond1310b22008-01-24 16:13:08 -0500993
Chris Masond3977122009-01-05 21:25:51 -0500994static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000995 u64 end, struct extent_state **cached_state,
996 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500997{
Chris Mason2c64c532009-09-02 15:04:12 -0400998 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
Josef Bacik2ac55d42010-02-03 19:33:23 +0000999 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001000}
Chris Masond1310b22008-01-24 16:13:08 -05001001
Chris Masond1310b22008-01-24 16:13:08 -05001002int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1003{
1004 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
1005}
Chris Masond1310b22008-01-24 16:13:08 -05001006
Chris Masond352ac62008-09-29 15:18:18 -04001007/*
1008 * either insert or lock state struct between start and end use mask to tell
1009 * us if waiting is desired.
1010 */
Chris Mason1edbb732009-09-02 13:24:36 -04001011int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -04001012 int bits, struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001013{
1014 int err;
1015 u64 failed_start;
1016 while (1) {
Chris Mason1edbb732009-09-02 13:24:36 -04001017 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
Chris Mason2c64c532009-09-02 15:04:12 -04001018 EXTENT_LOCKED, &failed_start,
1019 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001020 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1021 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1022 start = failed_start;
1023 } else {
1024 break;
1025 }
1026 WARN_ON(start > end);
1027 }
1028 return err;
1029}
Chris Masond1310b22008-01-24 16:13:08 -05001030
Chris Mason1edbb732009-09-02 13:24:36 -04001031int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1032{
Chris Mason2c64c532009-09-02 15:04:12 -04001033 return lock_extent_bits(tree, start, end, 0, NULL, mask);
Chris Mason1edbb732009-09-02 13:24:36 -04001034}
1035
Josef Bacik25179202008-10-29 14:49:05 -04001036int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1037 gfp_t mask)
1038{
1039 int err;
1040 u64 failed_start;
1041
Chris Mason2c64c532009-09-02 15:04:12 -04001042 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1043 &failed_start, NULL, mask);
Yan Zheng66435582008-10-30 14:19:50 -04001044 if (err == -EEXIST) {
1045 if (failed_start > start)
1046 clear_extent_bit(tree, start, failed_start - 1,
Chris Mason2c64c532009-09-02 15:04:12 -04001047 EXTENT_LOCKED, 1, 0, NULL, mask);
Josef Bacik25179202008-10-29 14:49:05 -04001048 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001049 }
Josef Bacik25179202008-10-29 14:49:05 -04001050 return 1;
1051}
Josef Bacik25179202008-10-29 14:49:05 -04001052
Chris Mason2c64c532009-09-02 15:04:12 -04001053int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1054 struct extent_state **cached, gfp_t mask)
1055{
1056 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1057 mask);
1058}
1059
Chris Masond1310b22008-01-24 16:13:08 -05001060int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1061 gfp_t mask)
1062{
Chris Mason2c64c532009-09-02 15:04:12 -04001063 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1064 mask);
Chris Masond1310b22008-01-24 16:13:08 -05001065}
Chris Masond1310b22008-01-24 16:13:08 -05001066
1067/*
1068 * helper function to set pages and extents in the tree dirty
1069 */
1070int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1071{
1072 unsigned long index = start >> PAGE_CACHE_SHIFT;
1073 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1074 struct page *page;
1075
1076 while (index <= end_index) {
1077 page = find_get_page(tree->mapping, index);
1078 BUG_ON(!page);
1079 __set_page_dirty_nobuffers(page);
1080 page_cache_release(page);
1081 index++;
1082 }
Chris Masond1310b22008-01-24 16:13:08 -05001083 return 0;
1084}
Chris Masond1310b22008-01-24 16:13:08 -05001085
1086/*
1087 * helper function to set both pages and extents in the tree writeback
1088 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05001089static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001090{
1091 unsigned long index = start >> PAGE_CACHE_SHIFT;
1092 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1093 struct page *page;
1094
1095 while (index <= end_index) {
1096 page = find_get_page(tree->mapping, index);
1097 BUG_ON(!page);
1098 set_page_writeback(page);
1099 page_cache_release(page);
1100 index++;
1101 }
Chris Masond1310b22008-01-24 16:13:08 -05001102 return 0;
1103}
Chris Masond1310b22008-01-24 16:13:08 -05001104
Chris Masond352ac62008-09-29 15:18:18 -04001105/*
1106 * find the first offset in the io tree with 'bits' set. zero is
1107 * returned if we find something, and *start_ret and *end_ret are
1108 * set to reflect the state struct that was found.
1109 *
1110 * If nothing was found, 1 is returned, < 0 on error
1111 */
Chris Masond1310b22008-01-24 16:13:08 -05001112int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1113 u64 *start_ret, u64 *end_ret, int bits)
1114{
1115 struct rb_node *node;
1116 struct extent_state *state;
1117 int ret = 1;
1118
Chris Masoncad321a2008-12-17 14:51:42 -05001119 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001120 /*
1121 * this search will find all the extents that end after
1122 * our range starts.
1123 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001124 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001125 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001126 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001127
Chris Masond3977122009-01-05 21:25:51 -05001128 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001129 state = rb_entry(node, struct extent_state, rb_node);
1130 if (state->end >= start && (state->state & bits)) {
1131 *start_ret = state->start;
1132 *end_ret = state->end;
1133 ret = 0;
1134 break;
1135 }
1136 node = rb_next(node);
1137 if (!node)
1138 break;
1139 }
1140out:
Chris Masoncad321a2008-12-17 14:51:42 -05001141 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001142 return ret;
1143}
Chris Masond1310b22008-01-24 16:13:08 -05001144
Chris Masond352ac62008-09-29 15:18:18 -04001145/* find the first state struct with 'bits' set after 'start', and
1146 * return it. tree->lock must be held. NULL will returned if
1147 * nothing was found after 'start'
1148 */
Chris Masond7fc6402008-02-18 12:12:38 -05001149struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1150 u64 start, int bits)
1151{
1152 struct rb_node *node;
1153 struct extent_state *state;
1154
1155 /*
1156 * this search will find all the extents that end after
1157 * our range starts.
1158 */
1159 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001160 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001161 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001162
Chris Masond3977122009-01-05 21:25:51 -05001163 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001164 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001165 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001166 return state;
Chris Masond3977122009-01-05 21:25:51 -05001167
Chris Masond7fc6402008-02-18 12:12:38 -05001168 node = rb_next(node);
1169 if (!node)
1170 break;
1171 }
1172out:
1173 return NULL;
1174}
Chris Masond7fc6402008-02-18 12:12:38 -05001175
Chris Masond352ac62008-09-29 15:18:18 -04001176/*
1177 * find a contiguous range of bytes in the file marked as delalloc, not
1178 * more than 'max_bytes'. start and end are used to return the range,
1179 *
1180 * 1 is returned if we find something, 0 if nothing was in the tree
1181 */
Chris Masonc8b97812008-10-29 14:49:59 -04001182static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001183 u64 *start, u64 *end, u64 max_bytes,
1184 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001185{
1186 struct rb_node *node;
1187 struct extent_state *state;
1188 u64 cur_start = *start;
1189 u64 found = 0;
1190 u64 total_bytes = 0;
1191
Chris Masoncad321a2008-12-17 14:51:42 -05001192 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001193
Chris Masond1310b22008-01-24 16:13:08 -05001194 /*
1195 * this search will find all the extents that end after
1196 * our range starts.
1197 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001198 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001199 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001200 if (!found)
1201 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001202 goto out;
1203 }
1204
Chris Masond3977122009-01-05 21:25:51 -05001205 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001206 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001207 if (found && (state->start != cur_start ||
1208 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001209 goto out;
1210 }
1211 if (!(state->state & EXTENT_DELALLOC)) {
1212 if (!found)
1213 *end = state->end;
1214 goto out;
1215 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001216 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001217 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001218 *cached_state = state;
1219 atomic_inc(&state->refs);
1220 }
Chris Masond1310b22008-01-24 16:13:08 -05001221 found++;
1222 *end = state->end;
1223 cur_start = state->end + 1;
1224 node = rb_next(node);
1225 if (!node)
1226 break;
1227 total_bytes += state->end - state->start + 1;
1228 if (total_bytes >= max_bytes)
1229 break;
1230 }
1231out:
Chris Masoncad321a2008-12-17 14:51:42 -05001232 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001233 return found;
1234}
1235
Chris Masonc8b97812008-10-29 14:49:59 -04001236static noinline int __unlock_for_delalloc(struct inode *inode,
1237 struct page *locked_page,
1238 u64 start, u64 end)
1239{
1240 int ret;
1241 struct page *pages[16];
1242 unsigned long index = start >> PAGE_CACHE_SHIFT;
1243 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1244 unsigned long nr_pages = end_index - index + 1;
1245 int i;
1246
1247 if (index == locked_page->index && end_index == index)
1248 return 0;
1249
Chris Masond3977122009-01-05 21:25:51 -05001250 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001251 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001252 min_t(unsigned long, nr_pages,
1253 ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001254 for (i = 0; i < ret; i++) {
1255 if (pages[i] != locked_page)
1256 unlock_page(pages[i]);
1257 page_cache_release(pages[i]);
1258 }
1259 nr_pages -= ret;
1260 index += ret;
1261 cond_resched();
1262 }
1263 return 0;
1264}
1265
1266static noinline int lock_delalloc_pages(struct inode *inode,
1267 struct page *locked_page,
1268 u64 delalloc_start,
1269 u64 delalloc_end)
1270{
1271 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1272 unsigned long start_index = index;
1273 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1274 unsigned long pages_locked = 0;
1275 struct page *pages[16];
1276 unsigned long nrpages;
1277 int ret;
1278 int i;
1279
1280 /* the caller is responsible for locking the start index */
1281 if (index == locked_page->index && index == end_index)
1282 return 0;
1283
1284 /* skip the page at the start index */
1285 nrpages = end_index - index + 1;
Chris Masond3977122009-01-05 21:25:51 -05001286 while (nrpages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001287 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001288 min_t(unsigned long,
1289 nrpages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001290 if (ret == 0) {
1291 ret = -EAGAIN;
1292 goto done;
1293 }
1294 /* now we have an array of pages, lock them all */
1295 for (i = 0; i < ret; i++) {
1296 /*
1297 * the caller is taking responsibility for
1298 * locked_page
1299 */
Chris Mason771ed682008-11-06 22:02:51 -05001300 if (pages[i] != locked_page) {
Chris Masonc8b97812008-10-29 14:49:59 -04001301 lock_page(pages[i]);
Chris Masonf2b1c412008-11-10 07:31:30 -05001302 if (!PageDirty(pages[i]) ||
1303 pages[i]->mapping != inode->i_mapping) {
Chris Mason771ed682008-11-06 22:02:51 -05001304 ret = -EAGAIN;
1305 unlock_page(pages[i]);
1306 page_cache_release(pages[i]);
1307 goto done;
1308 }
1309 }
Chris Masonc8b97812008-10-29 14:49:59 -04001310 page_cache_release(pages[i]);
Chris Mason771ed682008-11-06 22:02:51 -05001311 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001312 }
Chris Masonc8b97812008-10-29 14:49:59 -04001313 nrpages -= ret;
1314 index += ret;
1315 cond_resched();
1316 }
1317 ret = 0;
1318done:
1319 if (ret && pages_locked) {
1320 __unlock_for_delalloc(inode, locked_page,
1321 delalloc_start,
1322 ((u64)(start_index + pages_locked - 1)) <<
1323 PAGE_CACHE_SHIFT);
1324 }
1325 return ret;
1326}
1327
1328/*
1329 * find a contiguous range of bytes in the file marked as delalloc, not
1330 * more than 'max_bytes'. start and end are used to return the range,
1331 *
1332 * 1 is returned if we find something, 0 if nothing was in the tree
1333 */
1334static noinline u64 find_lock_delalloc_range(struct inode *inode,
1335 struct extent_io_tree *tree,
1336 struct page *locked_page,
1337 u64 *start, u64 *end,
1338 u64 max_bytes)
1339{
1340 u64 delalloc_start;
1341 u64 delalloc_end;
1342 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001343 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001344 int ret;
1345 int loops = 0;
1346
1347again:
1348 /* step one, find a bunch of delalloc bytes starting at start */
1349 delalloc_start = *start;
1350 delalloc_end = 0;
1351 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001352 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001353 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001354 *start = delalloc_start;
1355 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001356 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001357 return found;
1358 }
1359
1360 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001361 * start comes from the offset of locked_page. We have to lock
1362 * pages in order, so we can't process delalloc bytes before
1363 * locked_page
1364 */
Chris Masond3977122009-01-05 21:25:51 -05001365 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001366 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001367
1368 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001369 * make sure to limit the number of pages we try to lock down
1370 * if we're looping.
1371 */
Chris Masond3977122009-01-05 21:25:51 -05001372 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
Chris Mason771ed682008-11-06 22:02:51 -05001373 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
Chris Masond3977122009-01-05 21:25:51 -05001374
Chris Masonc8b97812008-10-29 14:49:59 -04001375 /* step two, lock all the pages after the page that has start */
1376 ret = lock_delalloc_pages(inode, locked_page,
1377 delalloc_start, delalloc_end);
1378 if (ret == -EAGAIN) {
1379 /* some of the pages are gone, lets avoid looping by
1380 * shortening the size of the delalloc range we're searching
1381 */
Chris Mason9655d292009-09-02 15:22:30 -04001382 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001383 if (!loops) {
1384 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1385 max_bytes = PAGE_CACHE_SIZE - offset;
1386 loops = 1;
1387 goto again;
1388 } else {
1389 found = 0;
1390 goto out_failed;
1391 }
1392 }
1393 BUG_ON(ret);
1394
1395 /* step three, lock the state bits for the whole range */
Chris Mason9655d292009-09-02 15:22:30 -04001396 lock_extent_bits(tree, delalloc_start, delalloc_end,
1397 0, &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001398
1399 /* then test to make sure it is all still delalloc */
1400 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001401 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001402 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001403 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1404 &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001405 __unlock_for_delalloc(inode, locked_page,
1406 delalloc_start, delalloc_end);
1407 cond_resched();
1408 goto again;
1409 }
Chris Mason9655d292009-09-02 15:22:30 -04001410 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001411 *start = delalloc_start;
1412 *end = delalloc_end;
1413out_failed:
1414 return found;
1415}
1416
1417int extent_clear_unlock_delalloc(struct inode *inode,
1418 struct extent_io_tree *tree,
1419 u64 start, u64 end, struct page *locked_page,
Chris Masona791e352009-10-08 11:27:10 -04001420 unsigned long op)
Chris Masonc8b97812008-10-29 14:49:59 -04001421{
1422 int ret;
1423 struct page *pages[16];
1424 unsigned long index = start >> PAGE_CACHE_SHIFT;
1425 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1426 unsigned long nr_pages = end_index - index + 1;
1427 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001428 int clear_bits = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001429
Chris Masona791e352009-10-08 11:27:10 -04001430 if (op & EXTENT_CLEAR_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001431 clear_bits |= EXTENT_LOCKED;
Chris Masona791e352009-10-08 11:27:10 -04001432 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001433 clear_bits |= EXTENT_DIRTY;
1434
Chris Masona791e352009-10-08 11:27:10 -04001435 if (op & EXTENT_CLEAR_DELALLOC)
Chris Mason771ed682008-11-06 22:02:51 -05001436 clear_bits |= EXTENT_DELALLOC;
1437
Chris Mason2c64c532009-09-02 15:04:12 -04001438 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
Josef Bacik32c00af2009-10-08 13:34:05 -04001439 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1440 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1441 EXTENT_SET_PRIVATE2)))
Chris Mason771ed682008-11-06 22:02:51 -05001442 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001443
Chris Masond3977122009-01-05 21:25:51 -05001444 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001445 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001446 min_t(unsigned long,
1447 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001448 for (i = 0; i < ret; i++) {
Chris Mason8b62b722009-09-02 16:53:46 -04001449
Chris Masona791e352009-10-08 11:27:10 -04001450 if (op & EXTENT_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001451 SetPagePrivate2(pages[i]);
1452
Chris Masonc8b97812008-10-29 14:49:59 -04001453 if (pages[i] == locked_page) {
1454 page_cache_release(pages[i]);
1455 continue;
1456 }
Chris Masona791e352009-10-08 11:27:10 -04001457 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001458 clear_page_dirty_for_io(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001459 if (op & EXTENT_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001460 set_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001461 if (op & EXTENT_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001462 end_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001463 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
Chris Mason771ed682008-11-06 22:02:51 -05001464 unlock_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001465 page_cache_release(pages[i]);
1466 }
1467 nr_pages -= ret;
1468 index += ret;
1469 cond_resched();
1470 }
1471 return 0;
1472}
Chris Masonc8b97812008-10-29 14:49:59 -04001473
Chris Masond352ac62008-09-29 15:18:18 -04001474/*
1475 * count the number of bytes in the tree that have a given bit(s)
1476 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1477 * cached. The total number found is returned.
1478 */
Chris Masond1310b22008-01-24 16:13:08 -05001479u64 count_range_bits(struct extent_io_tree *tree,
1480 u64 *start, u64 search_end, u64 max_bytes,
1481 unsigned long bits)
1482{
1483 struct rb_node *node;
1484 struct extent_state *state;
1485 u64 cur_start = *start;
1486 u64 total_bytes = 0;
1487 int found = 0;
1488
1489 if (search_end <= cur_start) {
Chris Masond1310b22008-01-24 16:13:08 -05001490 WARN_ON(1);
1491 return 0;
1492 }
1493
Chris Masoncad321a2008-12-17 14:51:42 -05001494 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001495 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1496 total_bytes = tree->dirty_bytes;
1497 goto out;
1498 }
1499 /*
1500 * this search will find all the extents that end after
1501 * our range starts.
1502 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001503 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001504 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001505 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001506
Chris Masond3977122009-01-05 21:25:51 -05001507 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001508 state = rb_entry(node, struct extent_state, rb_node);
1509 if (state->start > search_end)
1510 break;
1511 if (state->end >= cur_start && (state->state & bits)) {
1512 total_bytes += min(search_end, state->end) + 1 -
1513 max(cur_start, state->start);
1514 if (total_bytes >= max_bytes)
1515 break;
1516 if (!found) {
1517 *start = state->start;
1518 found = 1;
1519 }
1520 }
1521 node = rb_next(node);
1522 if (!node)
1523 break;
1524 }
1525out:
Chris Masoncad321a2008-12-17 14:51:42 -05001526 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001527 return total_bytes;
1528}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001529
Chris Masond352ac62008-09-29 15:18:18 -04001530/*
1531 * set the private field for a given byte offset in the tree. If there isn't
1532 * an extent_state there already, this does nothing.
1533 */
Chris Masond1310b22008-01-24 16:13:08 -05001534int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1535{
1536 struct rb_node *node;
1537 struct extent_state *state;
1538 int ret = 0;
1539
Chris Masoncad321a2008-12-17 14:51:42 -05001540 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001541 /*
1542 * this search will find all the extents that end after
1543 * our range starts.
1544 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001545 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001546 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001547 ret = -ENOENT;
1548 goto out;
1549 }
1550 state = rb_entry(node, struct extent_state, rb_node);
1551 if (state->start != start) {
1552 ret = -ENOENT;
1553 goto out;
1554 }
1555 state->private = private;
1556out:
Chris Masoncad321a2008-12-17 14:51:42 -05001557 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001558 return ret;
1559}
1560
1561int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1562{
1563 struct rb_node *node;
1564 struct extent_state *state;
1565 int ret = 0;
1566
Chris Masoncad321a2008-12-17 14:51:42 -05001567 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001568 /*
1569 * this search will find all the extents that end after
1570 * our range starts.
1571 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001572 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001573 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001574 ret = -ENOENT;
1575 goto out;
1576 }
1577 state = rb_entry(node, struct extent_state, rb_node);
1578 if (state->start != start) {
1579 ret = -ENOENT;
1580 goto out;
1581 }
1582 *private = state->private;
1583out:
Chris Masoncad321a2008-12-17 14:51:42 -05001584 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001585 return ret;
1586}
1587
1588/*
1589 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001590 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001591 * has the bits set. Otherwise, 1 is returned if any bit in the
1592 * range is found set.
1593 */
1594int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason9655d292009-09-02 15:22:30 -04001595 int bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001596{
1597 struct extent_state *state = NULL;
1598 struct rb_node *node;
1599 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001600
Chris Masoncad321a2008-12-17 14:51:42 -05001601 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -04001602 if (cached && cached->tree && cached->start == start)
1603 node = &cached->rb_node;
1604 else
1605 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001606 while (node && start <= end) {
1607 state = rb_entry(node, struct extent_state, rb_node);
1608
1609 if (filled && state->start > start) {
1610 bitset = 0;
1611 break;
1612 }
1613
1614 if (state->start > end)
1615 break;
1616
1617 if (state->state & bits) {
1618 bitset = 1;
1619 if (!filled)
1620 break;
1621 } else if (filled) {
1622 bitset = 0;
1623 break;
1624 }
Chris Mason46562ce2009-09-23 20:23:16 -04001625
1626 if (state->end == (u64)-1)
1627 break;
1628
Chris Masond1310b22008-01-24 16:13:08 -05001629 start = state->end + 1;
1630 if (start > end)
1631 break;
1632 node = rb_next(node);
1633 if (!node) {
1634 if (filled)
1635 bitset = 0;
1636 break;
1637 }
1638 }
Chris Masoncad321a2008-12-17 14:51:42 -05001639 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001640 return bitset;
1641}
Chris Masond1310b22008-01-24 16:13:08 -05001642
1643/*
1644 * helper function to set a given page up to date if all the
1645 * extents in the tree for that page are up to date
1646 */
1647static int check_page_uptodate(struct extent_io_tree *tree,
1648 struct page *page)
1649{
1650 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1651 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001652 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001653 SetPageUptodate(page);
1654 return 0;
1655}
1656
1657/*
1658 * helper function to unlock a page if all the extents in the tree
1659 * for that page are unlocked
1660 */
1661static int check_page_locked(struct extent_io_tree *tree,
1662 struct page *page)
1663{
1664 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1665 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001666 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001667 unlock_page(page);
1668 return 0;
1669}
1670
1671/*
1672 * helper function to end page writeback if all the extents
1673 * in the tree for that page are done with writeback
1674 */
1675static int check_page_writeback(struct extent_io_tree *tree,
1676 struct page *page)
1677{
Chris Mason1edbb732009-09-02 13:24:36 -04001678 end_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05001679 return 0;
1680}
1681
1682/* lots and lots of room for performance fixes in the end_bio funcs */
1683
1684/*
1685 * after a writepage IO is done, we need to:
1686 * clear the uptodate bits on error
1687 * clear the writeback bits in the extent tree for this IO
1688 * end_page_writeback if the page has no more pending IO
1689 *
1690 * Scheduling is not allowed, so the extent state tree is expected
1691 * to have one and only one object corresponding to this IO.
1692 */
Chris Masond1310b22008-01-24 16:13:08 -05001693static void end_bio_extent_writepage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001694{
Chris Mason1259ab72008-05-12 13:39:03 -04001695 int uptodate = err == 0;
Chris Masond1310b22008-01-24 16:13:08 -05001696 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001697 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001698 u64 start;
1699 u64 end;
1700 int whole_page;
Chris Mason1259ab72008-05-12 13:39:03 -04001701 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05001702
Chris Masond1310b22008-01-24 16:13:08 -05001703 do {
1704 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001705 tree = &BTRFS_I(page->mapping->host)->io_tree;
1706
Chris Masond1310b22008-01-24 16:13:08 -05001707 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1708 bvec->bv_offset;
1709 end = start + bvec->bv_len - 1;
1710
1711 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1712 whole_page = 1;
1713 else
1714 whole_page = 0;
1715
1716 if (--bvec >= bio->bi_io_vec)
1717 prefetchw(&bvec->bv_page->flags);
Chris Mason1259ab72008-05-12 13:39:03 -04001718 if (tree->ops && tree->ops->writepage_end_io_hook) {
1719 ret = tree->ops->writepage_end_io_hook(page, start,
David Woodhouse902b22f2008-08-20 08:51:49 -04001720 end, NULL, uptodate);
Chris Mason1259ab72008-05-12 13:39:03 -04001721 if (ret)
1722 uptodate = 0;
1723 }
1724
1725 if (!uptodate && tree->ops &&
1726 tree->ops->writepage_io_failed_hook) {
1727 ret = tree->ops->writepage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001728 start, end, NULL);
Chris Mason1259ab72008-05-12 13:39:03 -04001729 if (ret == 0) {
Chris Mason1259ab72008-05-12 13:39:03 -04001730 uptodate = (err == 0);
1731 continue;
1732 }
1733 }
1734
Chris Masond1310b22008-01-24 16:13:08 -05001735 if (!uptodate) {
Josef Bacik2ac55d42010-02-03 19:33:23 +00001736 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05001737 ClearPageUptodate(page);
1738 SetPageError(page);
1739 }
Chris Mason70dec802008-01-29 09:59:12 -05001740
Chris Masond1310b22008-01-24 16:13:08 -05001741 if (whole_page)
1742 end_page_writeback(page);
1743 else
1744 check_page_writeback(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05001745 } while (bvec >= bio->bi_io_vec);
Chris Mason2b1f55b2008-09-24 11:48:04 -04001746
Chris Masond1310b22008-01-24 16:13:08 -05001747 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001748}
1749
1750/*
1751 * after a readpage IO is done, we need to:
1752 * clear the uptodate bits on error
1753 * set the uptodate bits if things worked
1754 * set the page up to date if all extents in the tree are uptodate
1755 * clear the lock bit in the extent tree
1756 * unlock the page if there are no other extents locked for it
1757 *
1758 * Scheduling is not allowed, so the extent state tree is expected
1759 * to have one and only one object corresponding to this IO.
1760 */
Chris Masond1310b22008-01-24 16:13:08 -05001761static void end_bio_extent_readpage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001762{
1763 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Mason4125bf72010-02-03 18:18:45 +00001764 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1765 struct bio_vec *bvec = bio->bi_io_vec;
David Woodhouse902b22f2008-08-20 08:51:49 -04001766 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001767 u64 start;
1768 u64 end;
1769 int whole_page;
1770 int ret;
1771
Chris Masond20f7042008-12-08 16:58:54 -05001772 if (err)
1773 uptodate = 0;
1774
Chris Masond1310b22008-01-24 16:13:08 -05001775 do {
1776 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001777 tree = &BTRFS_I(page->mapping->host)->io_tree;
1778
Chris Masond1310b22008-01-24 16:13:08 -05001779 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1780 bvec->bv_offset;
1781 end = start + bvec->bv_len - 1;
1782
1783 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1784 whole_page = 1;
1785 else
1786 whole_page = 0;
1787
Chris Mason4125bf72010-02-03 18:18:45 +00001788 if (++bvec <= bvec_end)
Chris Masond1310b22008-01-24 16:13:08 -05001789 prefetchw(&bvec->bv_page->flags);
1790
1791 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
Chris Mason70dec802008-01-29 09:59:12 -05001792 ret = tree->ops->readpage_end_io_hook(page, start, end,
David Woodhouse902b22f2008-08-20 08:51:49 -04001793 NULL);
Chris Masond1310b22008-01-24 16:13:08 -05001794 if (ret)
1795 uptodate = 0;
1796 }
Chris Mason7e383262008-04-09 16:28:12 -04001797 if (!uptodate && tree->ops &&
1798 tree->ops->readpage_io_failed_hook) {
1799 ret = tree->ops->readpage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001800 start, end, NULL);
Chris Mason7e383262008-04-09 16:28:12 -04001801 if (ret == 0) {
Chris Mason3b951512008-04-17 11:29:12 -04001802 uptodate =
1803 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masond20f7042008-12-08 16:58:54 -05001804 if (err)
1805 uptodate = 0;
Chris Mason7e383262008-04-09 16:28:12 -04001806 continue;
1807 }
1808 }
Chris Mason70dec802008-01-29 09:59:12 -05001809
Chris Mason771ed682008-11-06 22:02:51 -05001810 if (uptodate) {
David Woodhouse902b22f2008-08-20 08:51:49 -04001811 set_extent_uptodate(tree, start, end,
1812 GFP_ATOMIC);
Chris Mason771ed682008-11-06 22:02:51 -05001813 }
David Woodhouse902b22f2008-08-20 08:51:49 -04001814 unlock_extent(tree, start, end, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05001815
Chris Mason70dec802008-01-29 09:59:12 -05001816 if (whole_page) {
1817 if (uptodate) {
1818 SetPageUptodate(page);
1819 } else {
1820 ClearPageUptodate(page);
1821 SetPageError(page);
1822 }
Chris Masond1310b22008-01-24 16:13:08 -05001823 unlock_page(page);
Chris Mason70dec802008-01-29 09:59:12 -05001824 } else {
1825 if (uptodate) {
1826 check_page_uptodate(tree, page);
1827 } else {
1828 ClearPageUptodate(page);
1829 SetPageError(page);
1830 }
Chris Masond1310b22008-01-24 16:13:08 -05001831 check_page_locked(tree, page);
Chris Mason70dec802008-01-29 09:59:12 -05001832 }
Chris Mason4125bf72010-02-03 18:18:45 +00001833 } while (bvec <= bvec_end);
Chris Masond1310b22008-01-24 16:13:08 -05001834
1835 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001836}
1837
1838/*
1839 * IO done from prepare_write is pretty simple, we just unlock
1840 * the structs in the extent tree when done, and set the uptodate bits
1841 * as appropriate.
1842 */
Chris Masond1310b22008-01-24 16:13:08 -05001843static void end_bio_extent_preparewrite(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001844{
1845 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1846 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001847 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001848 u64 start;
1849 u64 end;
1850
Chris Masond1310b22008-01-24 16:13:08 -05001851 do {
1852 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001853 tree = &BTRFS_I(page->mapping->host)->io_tree;
1854
Chris Masond1310b22008-01-24 16:13:08 -05001855 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1856 bvec->bv_offset;
1857 end = start + bvec->bv_len - 1;
1858
1859 if (--bvec >= bio->bi_io_vec)
1860 prefetchw(&bvec->bv_page->flags);
1861
1862 if (uptodate) {
1863 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1864 } else {
1865 ClearPageUptodate(page);
1866 SetPageError(page);
1867 }
1868
1869 unlock_extent(tree, start, end, GFP_ATOMIC);
1870
1871 } while (bvec >= bio->bi_io_vec);
1872
1873 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001874}
1875
1876static struct bio *
1877extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1878 gfp_t gfp_flags)
1879{
1880 struct bio *bio;
1881
1882 bio = bio_alloc(gfp_flags, nr_vecs);
1883
1884 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1885 while (!bio && (nr_vecs /= 2))
1886 bio = bio_alloc(gfp_flags, nr_vecs);
1887 }
1888
1889 if (bio) {
Chris Masone1c4b742008-04-22 13:26:46 -04001890 bio->bi_size = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001891 bio->bi_bdev = bdev;
1892 bio->bi_sector = first_sector;
1893 }
1894 return bio;
1895}
1896
Chris Masonc8b97812008-10-29 14:49:59 -04001897static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1898 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001899{
Chris Masond1310b22008-01-24 16:13:08 -05001900 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05001901 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1902 struct page *page = bvec->bv_page;
1903 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05001904 u64 start;
1905 u64 end;
1906
1907 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1908 end = start + bvec->bv_len - 1;
1909
David Woodhouse902b22f2008-08-20 08:51:49 -04001910 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001911
1912 bio_get(bio);
1913
Chris Mason065631f2008-02-20 12:07:25 -05001914 if (tree->ops && tree->ops->submit_bio_hook)
Chris Masonf1885912008-04-09 16:28:12 -04001915 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masonc8b97812008-10-29 14:49:59 -04001916 mirror_num, bio_flags);
Chris Mason0b86a832008-03-24 15:01:56 -04001917 else
1918 submit_bio(rw, bio);
Chris Masond1310b22008-01-24 16:13:08 -05001919 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1920 ret = -EOPNOTSUPP;
1921 bio_put(bio);
1922 return ret;
1923}
1924
1925static int submit_extent_page(int rw, struct extent_io_tree *tree,
1926 struct page *page, sector_t sector,
1927 size_t size, unsigned long offset,
1928 struct block_device *bdev,
1929 struct bio **bio_ret,
1930 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04001931 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04001932 int mirror_num,
1933 unsigned long prev_bio_flags,
1934 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001935{
1936 int ret = 0;
1937 struct bio *bio;
1938 int nr;
Chris Masonc8b97812008-10-29 14:49:59 -04001939 int contig = 0;
1940 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1941 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Chris Mason5b050f02008-11-11 09:34:41 -05001942 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05001943
1944 if (bio_ret && *bio_ret) {
1945 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001946 if (old_compressed)
1947 contig = bio->bi_sector == sector;
1948 else
1949 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1950 sector;
1951
1952 if (prev_bio_flags != bio_flags || !contig ||
Chris Mason239b14b2008-03-24 15:02:07 -04001953 (tree->ops && tree->ops->merge_bio_hook &&
Chris Masonc8b97812008-10-29 14:49:59 -04001954 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1955 bio_flags)) ||
1956 bio_add_page(bio, page, page_size, offset) < page_size) {
1957 ret = submit_one_bio(rw, bio, mirror_num,
1958 prev_bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05001959 bio = NULL;
1960 } else {
1961 return 0;
1962 }
1963 }
Chris Masonc8b97812008-10-29 14:49:59 -04001964 if (this_compressed)
1965 nr = BIO_MAX_PAGES;
1966 else
1967 nr = bio_get_nr_vecs(bdev);
1968
Chris Masond1310b22008-01-24 16:13:08 -05001969 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
Chris Mason70dec802008-01-29 09:59:12 -05001970
Chris Masonc8b97812008-10-29 14:49:59 -04001971 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05001972 bio->bi_end_io = end_io_func;
1973 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05001974
Chris Masond3977122009-01-05 21:25:51 -05001975 if (bio_ret)
Chris Masond1310b22008-01-24 16:13:08 -05001976 *bio_ret = bio;
Chris Masond3977122009-01-05 21:25:51 -05001977 else
Chris Masonc8b97812008-10-29 14:49:59 -04001978 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05001979
1980 return ret;
1981}
1982
1983void set_page_extent_mapped(struct page *page)
1984{
1985 if (!PagePrivate(page)) {
1986 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001987 page_cache_get(page);
Chris Mason6af118c2008-07-22 11:18:07 -04001988 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05001989 }
1990}
1991
Christoph Hellwigb2950862008-12-02 09:54:17 -05001992static void set_page_extent_head(struct page *page, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05001993{
1994 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1995}
1996
1997/*
1998 * basic readpage implementation. Locked extent state structs are inserted
1999 * into the tree that are removed when the IO is done (by the end_io
2000 * handlers)
2001 */
2002static int __extent_read_full_page(struct extent_io_tree *tree,
2003 struct page *page,
2004 get_extent_t *get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04002005 struct bio **bio, int mirror_num,
2006 unsigned long *bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002007{
2008 struct inode *inode = page->mapping->host;
2009 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2010 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2011 u64 end;
2012 u64 cur = start;
2013 u64 extent_offset;
2014 u64 last_byte = i_size_read(inode);
2015 u64 block_start;
2016 u64 cur_end;
2017 sector_t sector;
2018 struct extent_map *em;
2019 struct block_device *bdev;
2020 int ret;
2021 int nr = 0;
2022 size_t page_offset = 0;
2023 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04002024 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05002025 size_t blocksize = inode->i_sb->s_blocksize;
Chris Masonc8b97812008-10-29 14:49:59 -04002026 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002027
2028 set_page_extent_mapped(page);
2029
2030 end = page_end;
2031 lock_extent(tree, start, end, GFP_NOFS);
2032
Chris Masonc8b97812008-10-29 14:49:59 -04002033 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2034 char *userpage;
2035 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2036
2037 if (zero_offset) {
2038 iosize = PAGE_CACHE_SIZE - zero_offset;
2039 userpage = kmap_atomic(page, KM_USER0);
2040 memset(userpage + zero_offset, 0, iosize);
2041 flush_dcache_page(page);
2042 kunmap_atomic(userpage, KM_USER0);
2043 }
2044 }
Chris Masond1310b22008-01-24 16:13:08 -05002045 while (cur <= end) {
2046 if (cur >= last_byte) {
2047 char *userpage;
2048 iosize = PAGE_CACHE_SIZE - page_offset;
2049 userpage = kmap_atomic(page, KM_USER0);
2050 memset(userpage + page_offset, 0, iosize);
2051 flush_dcache_page(page);
2052 kunmap_atomic(userpage, KM_USER0);
2053 set_extent_uptodate(tree, cur, cur + iosize - 1,
2054 GFP_NOFS);
2055 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2056 break;
2057 }
2058 em = get_extent(inode, page, page_offset, cur,
2059 end - cur + 1, 0);
2060 if (IS_ERR(em) || !em) {
2061 SetPageError(page);
2062 unlock_extent(tree, cur, end, GFP_NOFS);
2063 break;
2064 }
Chris Masond1310b22008-01-24 16:13:08 -05002065 extent_offset = cur - em->start;
2066 BUG_ON(extent_map_end(em) <= cur);
2067 BUG_ON(end < cur);
2068
Chris Masonc8b97812008-10-29 14:49:59 -04002069 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2070 this_bio_flag = EXTENT_BIO_COMPRESSED;
2071
Chris Masond1310b22008-01-24 16:13:08 -05002072 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2073 cur_end = min(extent_map_end(em) - 1, end);
2074 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002075 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2076 disk_io_size = em->block_len;
2077 sector = em->block_start >> 9;
2078 } else {
2079 sector = (em->block_start + extent_offset) >> 9;
2080 disk_io_size = iosize;
2081 }
Chris Masond1310b22008-01-24 16:13:08 -05002082 bdev = em->bdev;
2083 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002084 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2085 block_start = EXTENT_MAP_HOLE;
Chris Masond1310b22008-01-24 16:13:08 -05002086 free_extent_map(em);
2087 em = NULL;
2088
2089 /* we've found a hole, just zero and go on */
2090 if (block_start == EXTENT_MAP_HOLE) {
2091 char *userpage;
2092 userpage = kmap_atomic(page, KM_USER0);
2093 memset(userpage + page_offset, 0, iosize);
2094 flush_dcache_page(page);
2095 kunmap_atomic(userpage, KM_USER0);
2096
2097 set_extent_uptodate(tree, cur, cur + iosize - 1,
2098 GFP_NOFS);
2099 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2100 cur = cur + iosize;
2101 page_offset += iosize;
2102 continue;
2103 }
2104 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04002105 if (test_range_bit(tree, cur, cur_end,
2106 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04002107 check_page_uptodate(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05002108 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2109 cur = cur + iosize;
2110 page_offset += iosize;
2111 continue;
2112 }
Chris Mason70dec802008-01-29 09:59:12 -05002113 /* we have an inline extent but it didn't get marked up
2114 * to date. Error out
2115 */
2116 if (block_start == EXTENT_MAP_INLINE) {
2117 SetPageError(page);
2118 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2119 cur = cur + iosize;
2120 page_offset += iosize;
2121 continue;
2122 }
Chris Masond1310b22008-01-24 16:13:08 -05002123
2124 ret = 0;
2125 if (tree->ops && tree->ops->readpage_io_hook) {
2126 ret = tree->ops->readpage_io_hook(page, cur,
2127 cur + iosize - 1);
2128 }
2129 if (!ret) {
Chris Mason89642222008-07-24 09:41:53 -04002130 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2131 pnr -= page->index;
Chris Masond1310b22008-01-24 16:13:08 -05002132 ret = submit_extent_page(READ, tree, page,
Chris Masonc8b97812008-10-29 14:49:59 -04002133 sector, disk_io_size, page_offset,
Chris Mason89642222008-07-24 09:41:53 -04002134 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04002135 end_bio_extent_readpage, mirror_num,
2136 *bio_flags,
2137 this_bio_flag);
Chris Mason89642222008-07-24 09:41:53 -04002138 nr++;
Chris Masonc8b97812008-10-29 14:49:59 -04002139 *bio_flags = this_bio_flag;
Chris Masond1310b22008-01-24 16:13:08 -05002140 }
2141 if (ret)
2142 SetPageError(page);
2143 cur = cur + iosize;
2144 page_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002145 }
2146 if (!nr) {
2147 if (!PageError(page))
2148 SetPageUptodate(page);
2149 unlock_page(page);
2150 }
2151 return 0;
2152}
2153
2154int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2155 get_extent_t *get_extent)
2156{
2157 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04002158 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002159 int ret;
2160
Chris Masonc8b97812008-10-29 14:49:59 -04002161 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2162 &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002163 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04002164 submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002165 return ret;
2166}
Chris Masond1310b22008-01-24 16:13:08 -05002167
Chris Mason11c83492009-04-20 15:50:09 -04002168static noinline void update_nr_written(struct page *page,
2169 struct writeback_control *wbc,
2170 unsigned long nr_written)
2171{
2172 wbc->nr_to_write -= nr_written;
2173 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2174 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2175 page->mapping->writeback_index = page->index + nr_written;
2176}
2177
Chris Masond1310b22008-01-24 16:13:08 -05002178/*
2179 * the writepage semantics are similar to regular writepage. extent
2180 * records are inserted to lock ranges in the tree, and as dirty areas
2181 * are found, they are marked writeback. Then the lock bits are removed
2182 * and the end_io handler clears the writeback ranges
2183 */
2184static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2185 void *data)
2186{
2187 struct inode *inode = page->mapping->host;
2188 struct extent_page_data *epd = data;
2189 struct extent_io_tree *tree = epd->tree;
2190 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2191 u64 delalloc_start;
2192 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2193 u64 end;
2194 u64 cur = start;
2195 u64 extent_offset;
2196 u64 last_byte = i_size_read(inode);
2197 u64 block_start;
2198 u64 iosize;
Chris Masone6dcd2d2008-07-17 12:53:50 -04002199 u64 unlock_start;
Chris Masond1310b22008-01-24 16:13:08 -05002200 sector_t sector;
Chris Mason2c64c532009-09-02 15:04:12 -04002201 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002202 struct extent_map *em;
2203 struct block_device *bdev;
2204 int ret;
2205 int nr = 0;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002206 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002207 size_t blocksize;
2208 loff_t i_size = i_size_read(inode);
2209 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2210 u64 nr_delalloc;
2211 u64 delalloc_end;
Chris Masonc8b97812008-10-29 14:49:59 -04002212 int page_started;
2213 int compressed;
Chris Masonffbd5172009-04-20 15:50:09 -04002214 int write_flags;
Chris Mason771ed682008-11-06 22:02:51 -05002215 unsigned long nr_written = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002216
Chris Masonffbd5172009-04-20 15:50:09 -04002217 if (wbc->sync_mode == WB_SYNC_ALL)
2218 write_flags = WRITE_SYNC_PLUG;
2219 else
2220 write_flags = WRITE;
2221
Chris Masond1310b22008-01-24 16:13:08 -05002222 WARN_ON(!PageLocked(page));
Chris Mason7f3c74f2008-07-18 12:01:11 -04002223 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason211c17f2008-05-15 09:13:45 -04002224 if (page->index > end_index ||
Chris Mason7f3c74f2008-07-18 12:01:11 -04002225 (page->index == end_index && !pg_offset)) {
Chris Mason39be25c2008-11-10 11:50:50 -05002226 page->mapping->a_ops->invalidatepage(page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002227 unlock_page(page);
2228 return 0;
2229 }
2230
2231 if (page->index == end_index) {
2232 char *userpage;
2233
Chris Masond1310b22008-01-24 16:13:08 -05002234 userpage = kmap_atomic(page, KM_USER0);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002235 memset(userpage + pg_offset, 0,
2236 PAGE_CACHE_SIZE - pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05002237 kunmap_atomic(userpage, KM_USER0);
Chris Mason211c17f2008-05-15 09:13:45 -04002238 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002239 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002240 pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002241
2242 set_page_extent_mapped(page);
2243
2244 delalloc_start = start;
2245 delalloc_end = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002246 page_started = 0;
Chris Mason771ed682008-11-06 22:02:51 -05002247 if (!epd->extent_locked) {
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002248 u64 delalloc_to_write = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002249 /*
2250 * make sure the wbc mapping index is at least updated
2251 * to this page.
2252 */
2253 update_nr_written(page, wbc, 0);
2254
Chris Masond3977122009-01-05 21:25:51 -05002255 while (delalloc_end < page_end) {
Chris Mason771ed682008-11-06 22:02:51 -05002256 nr_delalloc = find_lock_delalloc_range(inode, tree,
Chris Masonc8b97812008-10-29 14:49:59 -04002257 page,
2258 &delalloc_start,
Chris Masond1310b22008-01-24 16:13:08 -05002259 &delalloc_end,
2260 128 * 1024 * 1024);
Chris Mason771ed682008-11-06 22:02:51 -05002261 if (nr_delalloc == 0) {
2262 delalloc_start = delalloc_end + 1;
2263 continue;
2264 }
2265 tree->ops->fill_delalloc(inode, page, delalloc_start,
2266 delalloc_end, &page_started,
2267 &nr_written);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002268 /*
2269 * delalloc_end is already one less than the total
2270 * length, so we don't subtract one from
2271 * PAGE_CACHE_SIZE
2272 */
2273 delalloc_to_write += (delalloc_end - delalloc_start +
2274 PAGE_CACHE_SIZE) >>
2275 PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002276 delalloc_start = delalloc_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002277 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002278 if (wbc->nr_to_write < delalloc_to_write) {
2279 int thresh = 8192;
2280
2281 if (delalloc_to_write < thresh * 2)
2282 thresh = delalloc_to_write;
2283 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2284 thresh);
2285 }
Chris Masonc8b97812008-10-29 14:49:59 -04002286
Chris Mason771ed682008-11-06 22:02:51 -05002287 /* did the fill delalloc function already unlock and start
2288 * the IO?
2289 */
2290 if (page_started) {
2291 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002292 /*
2293 * we've unlocked the page, so we can't update
2294 * the mapping's writeback index, just update
2295 * nr_to_write.
2296 */
2297 wbc->nr_to_write -= nr_written;
2298 goto done_unlocked;
Chris Mason771ed682008-11-06 22:02:51 -05002299 }
Chris Masonc8b97812008-10-29 14:49:59 -04002300 }
Chris Mason247e7432008-07-17 12:53:51 -04002301 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04002302 ret = tree->ops->writepage_start_hook(page, start,
2303 page_end);
Chris Mason247e7432008-07-17 12:53:51 -04002304 if (ret == -EAGAIN) {
Chris Mason247e7432008-07-17 12:53:51 -04002305 redirty_page_for_writepage(wbc, page);
Chris Mason11c83492009-04-20 15:50:09 -04002306 update_nr_written(page, wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04002307 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002308 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002309 goto done_unlocked;
Chris Mason247e7432008-07-17 12:53:51 -04002310 }
2311 }
2312
Chris Mason11c83492009-04-20 15:50:09 -04002313 /*
2314 * we don't want to touch the inode after unlocking the page,
2315 * so we update the mapping writeback index now
2316 */
2317 update_nr_written(page, wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05002318
Chris Masond1310b22008-01-24 16:13:08 -05002319 end = page_end;
Chris Masond1310b22008-01-24 16:13:08 -05002320 if (last_byte <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002321 if (tree->ops && tree->ops->writepage_end_io_hook)
2322 tree->ops->writepage_end_io_hook(page, start,
2323 page_end, NULL, 1);
2324 unlock_start = page_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002325 goto done;
2326 }
2327
Chris Masond1310b22008-01-24 16:13:08 -05002328 blocksize = inode->i_sb->s_blocksize;
2329
2330 while (cur <= end) {
2331 if (cur >= last_byte) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002332 if (tree->ops && tree->ops->writepage_end_io_hook)
2333 tree->ops->writepage_end_io_hook(page, cur,
2334 page_end, NULL, 1);
2335 unlock_start = page_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002336 break;
2337 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002338 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002339 end - cur + 1, 1);
2340 if (IS_ERR(em) || !em) {
2341 SetPageError(page);
2342 break;
2343 }
2344
2345 extent_offset = cur - em->start;
2346 BUG_ON(extent_map_end(em) <= cur);
2347 BUG_ON(end < cur);
2348 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2349 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2350 sector = (em->block_start + extent_offset) >> 9;
2351 bdev = em->bdev;
2352 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04002353 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05002354 free_extent_map(em);
2355 em = NULL;
2356
Chris Masonc8b97812008-10-29 14:49:59 -04002357 /*
2358 * compressed and inline extents are written through other
2359 * paths in the FS
2360 */
2361 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05002362 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04002363 /*
2364 * end_io notification does not happen here for
2365 * compressed extents
2366 */
2367 if (!compressed && tree->ops &&
2368 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04002369 tree->ops->writepage_end_io_hook(page, cur,
2370 cur + iosize - 1,
2371 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002372 else if (compressed) {
2373 /* we don't want to end_page_writeback on
2374 * a compressed extent. this happens
2375 * elsewhere
2376 */
2377 nr++;
2378 }
2379
2380 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002381 pg_offset += iosize;
Chris Masone6dcd2d2008-07-17 12:53:50 -04002382 unlock_start = cur;
Chris Masond1310b22008-01-24 16:13:08 -05002383 continue;
2384 }
Chris Masond1310b22008-01-24 16:13:08 -05002385 /* leave this out until we have a page_mkwrite call */
2386 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
Chris Mason9655d292009-09-02 15:22:30 -04002387 EXTENT_DIRTY, 0, NULL)) {
Chris Masond1310b22008-01-24 16:13:08 -05002388 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002389 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002390 continue;
2391 }
Chris Masonc8b97812008-10-29 14:49:59 -04002392
Chris Masond1310b22008-01-24 16:13:08 -05002393 if (tree->ops && tree->ops->writepage_io_hook) {
2394 ret = tree->ops->writepage_io_hook(page, cur,
2395 cur + iosize - 1);
2396 } else {
2397 ret = 0;
2398 }
Chris Mason1259ab72008-05-12 13:39:03 -04002399 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05002400 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04002401 } else {
Chris Masond1310b22008-01-24 16:13:08 -05002402 unsigned long max_nr = end_index + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002403
Chris Masond1310b22008-01-24 16:13:08 -05002404 set_range_writeback(tree, cur, cur + iosize - 1);
2405 if (!PageWriteback(page)) {
Chris Masond3977122009-01-05 21:25:51 -05002406 printk(KERN_ERR "btrfs warning page %lu not "
2407 "writeback, cur %llu end %llu\n",
2408 page->index, (unsigned long long)cur,
Chris Masond1310b22008-01-24 16:13:08 -05002409 (unsigned long long)end);
2410 }
2411
Chris Masonffbd5172009-04-20 15:50:09 -04002412 ret = submit_extent_page(write_flags, tree, page,
2413 sector, iosize, pg_offset,
2414 bdev, &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04002415 end_bio_extent_writepage,
2416 0, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002417 if (ret)
2418 SetPageError(page);
2419 }
2420 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002421 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002422 nr++;
2423 }
2424done:
2425 if (nr == 0) {
2426 /* make sure the mapping tag for page dirty gets cleared */
2427 set_page_writeback(page);
2428 end_page_writeback(page);
2429 }
Chris Masond1310b22008-01-24 16:13:08 -05002430 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002431
Chris Mason11c83492009-04-20 15:50:09 -04002432done_unlocked:
2433
Chris Mason2c64c532009-09-02 15:04:12 -04002434 /* drop our reference on any cached states */
2435 free_extent_state(cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05002436 return 0;
2437}
2438
Chris Masond1310b22008-01-24 16:13:08 -05002439/**
Chris Mason4bef0842008-09-08 11:18:08 -04002440 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05002441 * @mapping: address space structure to write
2442 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2443 * @writepage: function called for each page
2444 * @data: data passed to writepage function
2445 *
2446 * If a page is already under I/O, write_cache_pages() skips it, even
2447 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2448 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2449 * and msync() need to guarantee that all the data which was dirty at the time
2450 * the call was made get new I/O started against them. If wbc->sync_mode is
2451 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2452 * existing IO to complete.
2453 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05002454static int extent_write_cache_pages(struct extent_io_tree *tree,
Chris Mason4bef0842008-09-08 11:18:08 -04002455 struct address_space *mapping,
2456 struct writeback_control *wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05002457 writepage_t writepage, void *data,
2458 void (*flush_fn)(void *))
Chris Masond1310b22008-01-24 16:13:08 -05002459{
Chris Masond1310b22008-01-24 16:13:08 -05002460 int ret = 0;
2461 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002462 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002463 struct pagevec pvec;
2464 int nr_pages;
2465 pgoff_t index;
2466 pgoff_t end; /* Inclusive */
2467 int scanned = 0;
2468 int range_whole = 0;
2469
Chris Masond1310b22008-01-24 16:13:08 -05002470 pagevec_init(&pvec, 0);
2471 if (wbc->range_cyclic) {
2472 index = mapping->writeback_index; /* Start from prev offset */
2473 end = -1;
2474 } else {
2475 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2476 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2477 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2478 range_whole = 1;
2479 scanned = 1;
2480 }
2481retry:
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002482 while (!done && !nr_to_write_done && (index <= end) &&
Chris Masond1310b22008-01-24 16:13:08 -05002483 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
Chris Masond3977122009-01-05 21:25:51 -05002484 PAGECACHE_TAG_DIRTY, min(end - index,
2485 (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
Chris Masond1310b22008-01-24 16:13:08 -05002486 unsigned i;
2487
2488 scanned = 1;
2489 for (i = 0; i < nr_pages; i++) {
2490 struct page *page = pvec.pages[i];
2491
2492 /*
2493 * At this point we hold neither mapping->tree_lock nor
2494 * lock on the page itself: the page may be truncated or
2495 * invalidated (changing page->mapping to NULL), or even
2496 * swizzled back from swapper_space to tmpfs file
2497 * mapping
2498 */
Chris Mason4bef0842008-09-08 11:18:08 -04002499 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2500 tree->ops->write_cache_pages_lock_hook(page);
2501 else
2502 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002503
2504 if (unlikely(page->mapping != mapping)) {
2505 unlock_page(page);
2506 continue;
2507 }
2508
2509 if (!wbc->range_cyclic && page->index > end) {
2510 done = 1;
2511 unlock_page(page);
2512 continue;
2513 }
2514
Chris Masond2c3f4f2008-11-19 12:44:22 -05002515 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05002516 if (PageWriteback(page))
2517 flush_fn(data);
Chris Masond1310b22008-01-24 16:13:08 -05002518 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05002519 }
Chris Masond1310b22008-01-24 16:13:08 -05002520
2521 if (PageWriteback(page) ||
2522 !clear_page_dirty_for_io(page)) {
2523 unlock_page(page);
2524 continue;
2525 }
2526
2527 ret = (*writepage)(page, wbc, data);
2528
2529 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2530 unlock_page(page);
2531 ret = 0;
2532 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002533 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002534 done = 1;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002535
2536 /*
2537 * the filesystem may choose to bump up nr_to_write.
2538 * We have to make sure to honor the new nr_to_write
2539 * at any time
2540 */
2541 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05002542 }
2543 pagevec_release(&pvec);
2544 cond_resched();
2545 }
2546 if (!scanned && !done) {
2547 /*
2548 * We hit the last page and there is more work to be done: wrap
2549 * back to the start of the file
2550 */
2551 scanned = 1;
2552 index = 0;
2553 goto retry;
2554 }
Chris Masond1310b22008-01-24 16:13:08 -05002555 return ret;
2556}
Chris Masond1310b22008-01-24 16:13:08 -05002557
Chris Masonffbd5172009-04-20 15:50:09 -04002558static void flush_epd_write_bio(struct extent_page_data *epd)
2559{
2560 if (epd->bio) {
2561 if (epd->sync_io)
2562 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2563 else
2564 submit_one_bio(WRITE, epd->bio, 0, 0);
2565 epd->bio = NULL;
2566 }
2567}
2568
Chris Masond2c3f4f2008-11-19 12:44:22 -05002569static noinline void flush_write_bio(void *data)
2570{
2571 struct extent_page_data *epd = data;
Chris Masonffbd5172009-04-20 15:50:09 -04002572 flush_epd_write_bio(epd);
Chris Masond2c3f4f2008-11-19 12:44:22 -05002573}
2574
Chris Masond1310b22008-01-24 16:13:08 -05002575int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2576 get_extent_t *get_extent,
2577 struct writeback_control *wbc)
2578{
2579 int ret;
2580 struct address_space *mapping = page->mapping;
2581 struct extent_page_data epd = {
2582 .bio = NULL,
2583 .tree = tree,
2584 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05002585 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04002586 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05002587 };
2588 struct writeback_control wbc_writepages = {
2589 .bdi = wbc->bdi,
Chris Masond313d7a2009-04-20 15:50:09 -04002590 .sync_mode = wbc->sync_mode,
Chris Masond1310b22008-01-24 16:13:08 -05002591 .older_than_this = NULL,
2592 .nr_to_write = 64,
2593 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2594 .range_end = (loff_t)-1,
2595 };
2596
Chris Masond1310b22008-01-24 16:13:08 -05002597 ret = __extent_writepage(page, wbc, &epd);
2598
Chris Mason4bef0842008-09-08 11:18:08 -04002599 extent_write_cache_pages(tree, mapping, &wbc_writepages,
Chris Masond2c3f4f2008-11-19 12:44:22 -05002600 __extent_writepage, &epd, flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04002601 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05002602 return ret;
2603}
Chris Masond1310b22008-01-24 16:13:08 -05002604
Chris Mason771ed682008-11-06 22:02:51 -05002605int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2606 u64 start, u64 end, get_extent_t *get_extent,
2607 int mode)
2608{
2609 int ret = 0;
2610 struct address_space *mapping = inode->i_mapping;
2611 struct page *page;
2612 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2613 PAGE_CACHE_SHIFT;
2614
2615 struct extent_page_data epd = {
2616 .bio = NULL,
2617 .tree = tree,
2618 .get_extent = get_extent,
2619 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04002620 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05002621 };
2622 struct writeback_control wbc_writepages = {
2623 .bdi = inode->i_mapping->backing_dev_info,
2624 .sync_mode = mode,
2625 .older_than_this = NULL,
2626 .nr_to_write = nr_pages * 2,
2627 .range_start = start,
2628 .range_end = end + 1,
2629 };
2630
Chris Masond3977122009-01-05 21:25:51 -05002631 while (start <= end) {
Chris Mason771ed682008-11-06 22:02:51 -05002632 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2633 if (clear_page_dirty_for_io(page))
2634 ret = __extent_writepage(page, &wbc_writepages, &epd);
2635 else {
2636 if (tree->ops && tree->ops->writepage_end_io_hook)
2637 tree->ops->writepage_end_io_hook(page, start,
2638 start + PAGE_CACHE_SIZE - 1,
2639 NULL, 1);
2640 unlock_page(page);
2641 }
2642 page_cache_release(page);
2643 start += PAGE_CACHE_SIZE;
2644 }
2645
Chris Masonffbd5172009-04-20 15:50:09 -04002646 flush_epd_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05002647 return ret;
2648}
Chris Masond1310b22008-01-24 16:13:08 -05002649
2650int extent_writepages(struct extent_io_tree *tree,
2651 struct address_space *mapping,
2652 get_extent_t *get_extent,
2653 struct writeback_control *wbc)
2654{
2655 int ret = 0;
2656 struct extent_page_data epd = {
2657 .bio = NULL,
2658 .tree = tree,
2659 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05002660 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04002661 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05002662 };
2663
Chris Mason4bef0842008-09-08 11:18:08 -04002664 ret = extent_write_cache_pages(tree, mapping, wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05002665 __extent_writepage, &epd,
2666 flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04002667 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05002668 return ret;
2669}
Chris Masond1310b22008-01-24 16:13:08 -05002670
2671int extent_readpages(struct extent_io_tree *tree,
2672 struct address_space *mapping,
2673 struct list_head *pages, unsigned nr_pages,
2674 get_extent_t get_extent)
2675{
2676 struct bio *bio = NULL;
2677 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04002678 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002679
Chris Masond1310b22008-01-24 16:13:08 -05002680 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2681 struct page *page = list_entry(pages->prev, struct page, lru);
2682
2683 prefetchw(&page->flags);
2684 list_del(&page->lru);
Nick Piggin28ecb6092010-03-17 13:31:04 +00002685 if (!add_to_page_cache_lru(page, mapping,
Chris Masond1310b22008-01-24 16:13:08 -05002686 page->index, GFP_KERNEL)) {
Chris Masonf1885912008-04-09 16:28:12 -04002687 __extent_read_full_page(tree, page, get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04002688 &bio, 0, &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002689 }
2690 page_cache_release(page);
2691 }
Chris Masond1310b22008-01-24 16:13:08 -05002692 BUG_ON(!list_empty(pages));
2693 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04002694 submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002695 return 0;
2696}
Chris Masond1310b22008-01-24 16:13:08 -05002697
2698/*
2699 * basic invalidatepage code, this waits on any locked or writeback
2700 * ranges corresponding to the page, and then deletes any extent state
2701 * records from the tree
2702 */
2703int extent_invalidatepage(struct extent_io_tree *tree,
2704 struct page *page, unsigned long offset)
2705{
Josef Bacik2ac55d42010-02-03 19:33:23 +00002706 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002707 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2708 u64 end = start + PAGE_CACHE_SIZE - 1;
2709 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2710
Chris Masond3977122009-01-05 21:25:51 -05002711 start += (offset + blocksize - 1) & ~(blocksize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05002712 if (start > end)
2713 return 0;
2714
Josef Bacik2ac55d42010-02-03 19:33:23 +00002715 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
Chris Mason1edbb732009-09-02 13:24:36 -04002716 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05002717 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04002718 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2719 EXTENT_DO_ACCOUNTING,
Josef Bacik2ac55d42010-02-03 19:33:23 +00002720 1, 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002721 return 0;
2722}
Chris Masond1310b22008-01-24 16:13:08 -05002723
2724/*
2725 * simple commit_write call, set_range_dirty is used to mark both
2726 * the pages and the extent records as dirty
2727 */
2728int extent_commit_write(struct extent_io_tree *tree,
2729 struct inode *inode, struct page *page,
2730 unsigned from, unsigned to)
2731{
2732 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2733
2734 set_page_extent_mapped(page);
2735 set_page_dirty(page);
2736
2737 if (pos > inode->i_size) {
2738 i_size_write(inode, pos);
2739 mark_inode_dirty(inode);
2740 }
2741 return 0;
2742}
Chris Masond1310b22008-01-24 16:13:08 -05002743
2744int extent_prepare_write(struct extent_io_tree *tree,
2745 struct inode *inode, struct page *page,
2746 unsigned from, unsigned to, get_extent_t *get_extent)
2747{
2748 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2749 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2750 u64 block_start;
2751 u64 orig_block_start;
2752 u64 block_end;
2753 u64 cur_end;
2754 struct extent_map *em;
2755 unsigned blocksize = 1 << inode->i_blkbits;
2756 size_t page_offset = 0;
2757 size_t block_off_start;
2758 size_t block_off_end;
2759 int err = 0;
2760 int iocount = 0;
2761 int ret = 0;
2762 int isnew;
2763
2764 set_page_extent_mapped(page);
2765
2766 block_start = (page_start + from) & ~((u64)blocksize - 1);
2767 block_end = (page_start + to - 1) | (blocksize - 1);
2768 orig_block_start = block_start;
2769
2770 lock_extent(tree, page_start, page_end, GFP_NOFS);
Chris Masond3977122009-01-05 21:25:51 -05002771 while (block_start <= block_end) {
Chris Masond1310b22008-01-24 16:13:08 -05002772 em = get_extent(inode, page, page_offset, block_start,
2773 block_end - block_start + 1, 1);
Chris Masond3977122009-01-05 21:25:51 -05002774 if (IS_ERR(em) || !em)
Chris Masond1310b22008-01-24 16:13:08 -05002775 goto err;
Chris Masond3977122009-01-05 21:25:51 -05002776
Chris Masond1310b22008-01-24 16:13:08 -05002777 cur_end = min(block_end, extent_map_end(em) - 1);
2778 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2779 block_off_end = block_off_start + blocksize;
2780 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2781
2782 if (!PageUptodate(page) && isnew &&
2783 (block_off_end > to || block_off_start < from)) {
2784 void *kaddr;
2785
2786 kaddr = kmap_atomic(page, KM_USER0);
2787 if (block_off_end > to)
2788 memset(kaddr + to, 0, block_off_end - to);
2789 if (block_off_start < from)
2790 memset(kaddr + block_off_start, 0,
2791 from - block_off_start);
2792 flush_dcache_page(page);
2793 kunmap_atomic(kaddr, KM_USER0);
2794 }
2795 if ((em->block_start != EXTENT_MAP_HOLE &&
2796 em->block_start != EXTENT_MAP_INLINE) &&
2797 !isnew && !PageUptodate(page) &&
2798 (block_off_end > to || block_off_start < from) &&
2799 !test_range_bit(tree, block_start, cur_end,
Chris Mason9655d292009-09-02 15:22:30 -04002800 EXTENT_UPTODATE, 1, NULL)) {
Chris Masond1310b22008-01-24 16:13:08 -05002801 u64 sector;
2802 u64 extent_offset = block_start - em->start;
2803 size_t iosize;
2804 sector = (em->block_start + extent_offset) >> 9;
2805 iosize = (cur_end - block_start + blocksize) &
2806 ~((u64)blocksize - 1);
2807 /*
2808 * we've already got the extent locked, but we
2809 * need to split the state such that our end_bio
2810 * handler can clear the lock.
2811 */
2812 set_extent_bit(tree, block_start,
2813 block_start + iosize - 1,
Chris Mason2c64c532009-09-02 15:04:12 -04002814 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002815 ret = submit_extent_page(READ, tree, page,
2816 sector, iosize, page_offset, em->bdev,
2817 NULL, 1,
Chris Masonc8b97812008-10-29 14:49:59 -04002818 end_bio_extent_preparewrite, 0,
2819 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002820 iocount++;
2821 block_start = block_start + iosize;
2822 } else {
2823 set_extent_uptodate(tree, block_start, cur_end,
2824 GFP_NOFS);
2825 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2826 block_start = cur_end + 1;
2827 }
2828 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2829 free_extent_map(em);
2830 }
2831 if (iocount) {
2832 wait_extent_bit(tree, orig_block_start,
2833 block_end, EXTENT_LOCKED);
2834 }
2835 check_page_uptodate(tree, page);
2836err:
2837 /* FIXME, zero out newly allocated blocks on error */
2838 return err;
2839}
Chris Masond1310b22008-01-24 16:13:08 -05002840
2841/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04002842 * a helper for releasepage, this tests for areas of the page that
2843 * are locked or under IO and drops the related state bits if it is safe
2844 * to drop the page.
2845 */
2846int try_release_extent_state(struct extent_map_tree *map,
2847 struct extent_io_tree *tree, struct page *page,
2848 gfp_t mask)
2849{
2850 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2851 u64 end = start + PAGE_CACHE_SIZE - 1;
2852 int ret = 1;
2853
Chris Mason211f90e2008-07-18 11:56:15 -04002854 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04002855 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04002856 ret = 0;
2857 else {
2858 if ((mask & GFP_NOFS) == GFP_NOFS)
2859 mask = GFP_NOFS;
Chris Mason11ef1602009-09-23 20:28:46 -04002860 /*
2861 * at this point we can safely clear everything except the
2862 * locked bit and the nodatasum bit
2863 */
2864 clear_extent_bit(tree, start, end,
2865 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2866 0, 0, NULL, mask);
Chris Mason7b13b7b2008-04-18 10:29:50 -04002867 }
2868 return ret;
2869}
Chris Mason7b13b7b2008-04-18 10:29:50 -04002870
2871/*
Chris Masond1310b22008-01-24 16:13:08 -05002872 * a helper for releasepage. As long as there are no locked extents
2873 * in the range corresponding to the page, both state records and extent
2874 * map records are removed
2875 */
2876int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05002877 struct extent_io_tree *tree, struct page *page,
2878 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05002879{
2880 struct extent_map *em;
2881 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2882 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04002883
Chris Mason70dec802008-01-29 09:59:12 -05002884 if ((mask & __GFP_WAIT) &&
2885 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05002886 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05002887 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05002888 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04002889 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05002890 em = lookup_extent_mapping(map, start, len);
Chris Mason70dec802008-01-29 09:59:12 -05002891 if (!em || IS_ERR(em)) {
Chris Mason890871b2009-09-02 16:24:52 -04002892 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002893 break;
2894 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002895 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2896 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04002897 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002898 free_extent_map(em);
2899 break;
2900 }
2901 if (!test_range_bit(tree, em->start,
2902 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04002903 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04002904 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05002905 remove_extent_mapping(map, em);
2906 /* once for the rb tree */
2907 free_extent_map(em);
2908 }
2909 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04002910 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002911
2912 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05002913 free_extent_map(em);
2914 }
Chris Masond1310b22008-01-24 16:13:08 -05002915 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04002916 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05002917}
Chris Masond1310b22008-01-24 16:13:08 -05002918
2919sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2920 get_extent_t *get_extent)
2921{
2922 struct inode *inode = mapping->host;
Josef Bacik2ac55d42010-02-03 19:33:23 +00002923 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002924 u64 start = iblock << inode->i_blkbits;
2925 sector_t sector = 0;
Yan Zhengd899e052008-10-30 14:25:28 -04002926 size_t blksize = (1 << inode->i_blkbits);
Chris Masond1310b22008-01-24 16:13:08 -05002927 struct extent_map *em;
2928
Josef Bacik2ac55d42010-02-03 19:33:23 +00002929 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2930 0, &cached_state, GFP_NOFS);
Yan Zhengd899e052008-10-30 14:25:28 -04002931 em = get_extent(inode, NULL, 0, start, blksize, 0);
Josef Bacik2ac55d42010-02-03 19:33:23 +00002932 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2933 start + blksize - 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002934 if (!em || IS_ERR(em))
2935 return 0;
2936
Yan Zhengd899e052008-10-30 14:25:28 -04002937 if (em->block_start > EXTENT_MAP_LAST_BYTE)
Chris Masond1310b22008-01-24 16:13:08 -05002938 goto out;
2939
2940 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
Chris Masond1310b22008-01-24 16:13:08 -05002941out:
2942 free_extent_map(em);
2943 return sector;
2944}
2945
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002946int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2947 __u64 start, __u64 len, get_extent_t *get_extent)
2948{
2949 int ret;
2950 u64 off = start;
2951 u64 max = start + len;
2952 u32 flags = 0;
2953 u64 disko = 0;
2954 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00002955 struct extent_state *cached_state = NULL;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002956 int end = 0;
2957 u64 em_start = 0, em_len = 0;
2958 unsigned long emflags;
2959 ret = 0;
2960
2961 if (len == 0)
2962 return -EINVAL;
2963
Josef Bacik2ac55d42010-02-03 19:33:23 +00002964 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2965 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002966 em = get_extent(inode, NULL, 0, off, max - off, 0);
2967 if (!em)
2968 goto out;
2969 if (IS_ERR(em)) {
2970 ret = PTR_ERR(em);
2971 goto out;
2972 }
2973 while (!end) {
2974 off = em->start + em->len;
2975 if (off >= max)
2976 end = 1;
2977
2978 em_start = em->start;
2979 em_len = em->len;
2980
2981 disko = 0;
2982 flags = 0;
2983
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002984 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002985 end = 1;
2986 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002987 } else if (em->block_start == EXTENT_MAP_HOLE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002988 flags |= FIEMAP_EXTENT_UNWRITTEN;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002989 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002990 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2991 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002992 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002993 flags |= (FIEMAP_EXTENT_DELALLOC |
2994 FIEMAP_EXTENT_UNKNOWN);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04002995 } else {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002996 disko = em->block_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05002997 }
2998 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2999 flags |= FIEMAP_EXTENT_ENCODED;
3000
3001 emflags = em->flags;
3002 free_extent_map(em);
3003 em = NULL;
3004
3005 if (!end) {
3006 em = get_extent(inode, NULL, 0, off, max - off, 0);
3007 if (!em)
3008 goto out;
3009 if (IS_ERR(em)) {
3010 ret = PTR_ERR(em);
3011 goto out;
3012 }
3013 emflags = em->flags;
3014 }
3015 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
3016 flags |= FIEMAP_EXTENT_LAST;
3017 end = 1;
3018 }
3019
3020 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3021 em_len, flags);
3022 if (ret)
3023 goto out_free;
3024 }
3025out_free:
3026 free_extent_map(em);
3027out:
Josef Bacik2ac55d42010-02-03 19:33:23 +00003028 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3029 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003030 return ret;
3031}
3032
Chris Masond1310b22008-01-24 16:13:08 -05003033static inline struct page *extent_buffer_page(struct extent_buffer *eb,
3034 unsigned long i)
3035{
3036 struct page *p;
3037 struct address_space *mapping;
3038
3039 if (i == 0)
3040 return eb->first_page;
3041 i += eb->start >> PAGE_CACHE_SHIFT;
3042 mapping = eb->first_page->mapping;
Chris Mason33958dc2008-07-30 10:29:12 -04003043 if (!mapping)
3044 return NULL;
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003045
3046 /*
3047 * extent_buffer_page is only called after pinning the page
3048 * by increasing the reference count. So we know the page must
3049 * be in the radix tree.
3050 */
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003051 rcu_read_lock();
Chris Masond1310b22008-01-24 16:13:08 -05003052 p = radix_tree_lookup(&mapping->page_tree, i);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003053 rcu_read_unlock();
Chris Mason2b1f55b2008-09-24 11:48:04 -04003054
Chris Masond1310b22008-01-24 16:13:08 -05003055 return p;
3056}
3057
Chris Mason6af118c2008-07-22 11:18:07 -04003058static inline unsigned long num_extent_pages(u64 start, u64 len)
Chris Masonce9adaa2008-04-09 16:28:12 -04003059{
Chris Mason6af118c2008-07-22 11:18:07 -04003060 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3061 (start >> PAGE_CACHE_SHIFT);
Chris Mason728131d2008-04-09 16:28:12 -04003062}
3063
Chris Masond1310b22008-01-24 16:13:08 -05003064static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3065 u64 start,
3066 unsigned long len,
3067 gfp_t mask)
3068{
3069 struct extent_buffer *eb = NULL;
Chris Mason39351272009-02-04 09:24:05 -05003070#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003071 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -04003072#endif
Chris Masond1310b22008-01-24 16:13:08 -05003073
Chris Masond1310b22008-01-24 16:13:08 -05003074 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Chris Masond1310b22008-01-24 16:13:08 -05003075 eb->start = start;
3076 eb->len = len;
Chris Masonb4ce94d2009-02-04 09:25:08 -05003077 spin_lock_init(&eb->lock);
3078 init_waitqueue_head(&eb->lock_wq);
3079
Chris Mason39351272009-02-04 09:24:05 -05003080#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003081 spin_lock_irqsave(&leak_lock, flags);
3082 list_add(&eb->leak_list, &buffers);
3083 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003084#endif
Chris Masond1310b22008-01-24 16:13:08 -05003085 atomic_set(&eb->refs, 1);
3086
3087 return eb;
3088}
3089
3090static void __free_extent_buffer(struct extent_buffer *eb)
3091{
Chris Mason39351272009-02-04 09:24:05 -05003092#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003093 unsigned long flags;
3094 spin_lock_irqsave(&leak_lock, flags);
3095 list_del(&eb->leak_list);
3096 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003097#endif
Chris Masond1310b22008-01-24 16:13:08 -05003098 kmem_cache_free(extent_buffer_cache, eb);
3099}
3100
3101struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3102 u64 start, unsigned long len,
3103 struct page *page0,
3104 gfp_t mask)
3105{
3106 unsigned long num_pages = num_extent_pages(start, len);
3107 unsigned long i;
3108 unsigned long index = start >> PAGE_CACHE_SHIFT;
3109 struct extent_buffer *eb;
Chris Mason6af118c2008-07-22 11:18:07 -04003110 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003111 struct page *p;
3112 struct address_space *mapping = tree->mapping;
3113 int uptodate = 1;
3114
Chris Mason6af118c2008-07-22 11:18:07 -04003115 spin_lock(&tree->buffer_lock);
3116 eb = buffer_search(tree, start);
3117 if (eb) {
3118 atomic_inc(&eb->refs);
3119 spin_unlock(&tree->buffer_lock);
Josef Bacik0f9dd462008-09-23 13:14:11 -04003120 mark_page_accessed(eb->first_page);
Chris Mason6af118c2008-07-22 11:18:07 -04003121 return eb;
3122 }
3123 spin_unlock(&tree->buffer_lock);
3124
Chris Masond1310b22008-01-24 16:13:08 -05003125 eb = __alloc_extent_buffer(tree, start, len, mask);
Peter2b114d12008-04-01 11:21:40 -04003126 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05003127 return NULL;
3128
Chris Masond1310b22008-01-24 16:13:08 -05003129 if (page0) {
3130 eb->first_page = page0;
3131 i = 1;
3132 index++;
3133 page_cache_get(page0);
3134 mark_page_accessed(page0);
3135 set_page_extent_mapped(page0);
Chris Masond1310b22008-01-24 16:13:08 -05003136 set_page_extent_head(page0, len);
Chris Masonf1885912008-04-09 16:28:12 -04003137 uptodate = PageUptodate(page0);
Chris Masond1310b22008-01-24 16:13:08 -05003138 } else {
3139 i = 0;
3140 }
3141 for (; i < num_pages; i++, index++) {
3142 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3143 if (!p) {
3144 WARN_ON(1);
Chris Mason6af118c2008-07-22 11:18:07 -04003145 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05003146 }
3147 set_page_extent_mapped(p);
3148 mark_page_accessed(p);
3149 if (i == 0) {
3150 eb->first_page = p;
3151 set_page_extent_head(p, len);
3152 } else {
3153 set_page_private(p, EXTENT_PAGE_PRIVATE);
3154 }
3155 if (!PageUptodate(p))
3156 uptodate = 0;
3157 unlock_page(p);
3158 }
3159 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05003160 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003161
Chris Mason6af118c2008-07-22 11:18:07 -04003162 spin_lock(&tree->buffer_lock);
3163 exists = buffer_tree_insert(tree, start, &eb->rb_node);
3164 if (exists) {
3165 /* add one reference for the caller */
3166 atomic_inc(&exists->refs);
3167 spin_unlock(&tree->buffer_lock);
3168 goto free_eb;
3169 }
Chris Mason6af118c2008-07-22 11:18:07 -04003170 /* add one reference for the tree */
3171 atomic_inc(&eb->refs);
Yan, Zhengf044ba72010-02-04 08:46:56 +00003172 spin_unlock(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003173 return eb;
3174
Chris Mason6af118c2008-07-22 11:18:07 -04003175free_eb:
Chris Masond1310b22008-01-24 16:13:08 -05003176 if (!atomic_dec_and_test(&eb->refs))
Chris Mason6af118c2008-07-22 11:18:07 -04003177 return exists;
3178 for (index = 1; index < i; index++)
Chris Masond1310b22008-01-24 16:13:08 -05003179 page_cache_release(extent_buffer_page(eb, index));
Chris Mason6af118c2008-07-22 11:18:07 -04003180 page_cache_release(extent_buffer_page(eb, 0));
Chris Masond1310b22008-01-24 16:13:08 -05003181 __free_extent_buffer(eb);
Chris Mason6af118c2008-07-22 11:18:07 -04003182 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05003183}
Chris Masond1310b22008-01-24 16:13:08 -05003184
3185struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3186 u64 start, unsigned long len,
3187 gfp_t mask)
3188{
Chris Masond1310b22008-01-24 16:13:08 -05003189 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -05003190
Chris Mason6af118c2008-07-22 11:18:07 -04003191 spin_lock(&tree->buffer_lock);
3192 eb = buffer_search(tree, start);
3193 if (eb)
3194 atomic_inc(&eb->refs);
3195 spin_unlock(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003196
Josef Bacik0f9dd462008-09-23 13:14:11 -04003197 if (eb)
3198 mark_page_accessed(eb->first_page);
3199
Chris Masond1310b22008-01-24 16:13:08 -05003200 return eb;
Chris Masond1310b22008-01-24 16:13:08 -05003201}
Chris Masond1310b22008-01-24 16:13:08 -05003202
3203void free_extent_buffer(struct extent_buffer *eb)
3204{
Chris Masond1310b22008-01-24 16:13:08 -05003205 if (!eb)
3206 return;
3207
3208 if (!atomic_dec_and_test(&eb->refs))
3209 return;
3210
Chris Mason6af118c2008-07-22 11:18:07 -04003211 WARN_ON(1);
Chris Masond1310b22008-01-24 16:13:08 -05003212}
Chris Masond1310b22008-01-24 16:13:08 -05003213
3214int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3215 struct extent_buffer *eb)
3216{
Chris Masond1310b22008-01-24 16:13:08 -05003217 unsigned long i;
3218 unsigned long num_pages;
3219 struct page *page;
3220
Chris Masond1310b22008-01-24 16:13:08 -05003221 num_pages = num_extent_pages(eb->start, eb->len);
3222
3223 for (i = 0; i < num_pages; i++) {
3224 page = extent_buffer_page(eb, i);
Chris Masonb9473432009-03-13 11:00:37 -04003225 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05003226 continue;
3227
Chris Masona61e6f22008-07-22 11:18:08 -04003228 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003229 if (i == 0)
3230 set_page_extent_head(page, eb->len);
3231 else
3232 set_page_private(page, EXTENT_PAGE_PRIVATE);
3233
Chris Masond1310b22008-01-24 16:13:08 -05003234 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003235 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003236 if (!PageDirty(page)) {
3237 radix_tree_tag_clear(&page->mapping->page_tree,
3238 page_index(page),
3239 PAGECACHE_TAG_DIRTY);
3240 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003241 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masona61e6f22008-07-22 11:18:08 -04003242 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003243 }
3244 return 0;
3245}
Chris Masond1310b22008-01-24 16:13:08 -05003246
3247int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3248 struct extent_buffer *eb)
3249{
3250 return wait_on_extent_writeback(tree, eb->start,
3251 eb->start + eb->len - 1);
3252}
Chris Masond1310b22008-01-24 16:13:08 -05003253
3254int set_extent_buffer_dirty(struct extent_io_tree *tree,
3255 struct extent_buffer *eb)
3256{
3257 unsigned long i;
3258 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04003259 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003260
Chris Masonb9473432009-03-13 11:00:37 -04003261 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003262 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masonb9473432009-03-13 11:00:37 -04003263 for (i = 0; i < num_pages; i++)
Chris Masond1310b22008-01-24 16:13:08 -05003264 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
Chris Masonb9473432009-03-13 11:00:37 -04003265 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05003266}
Chris Masond1310b22008-01-24 16:13:08 -05003267
Chris Mason1259ab72008-05-12 13:39:03 -04003268int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003269 struct extent_buffer *eb,
3270 struct extent_state **cached_state)
Chris Mason1259ab72008-05-12 13:39:03 -04003271{
3272 unsigned long i;
3273 struct page *page;
3274 unsigned long num_pages;
3275
3276 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masonb4ce94d2009-02-04 09:25:08 -05003277 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Mason1259ab72008-05-12 13:39:03 -04003278
3279 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003280 cached_state, GFP_NOFS);
Chris Mason1259ab72008-05-12 13:39:03 -04003281 for (i = 0; i < num_pages; i++) {
3282 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04003283 if (page)
3284 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04003285 }
3286 return 0;
3287}
3288
Chris Masond1310b22008-01-24 16:13:08 -05003289int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3290 struct extent_buffer *eb)
3291{
3292 unsigned long i;
3293 struct page *page;
3294 unsigned long num_pages;
3295
3296 num_pages = num_extent_pages(eb->start, eb->len);
3297
3298 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3299 GFP_NOFS);
3300 for (i = 0; i < num_pages; i++) {
3301 page = extent_buffer_page(eb, i);
3302 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3303 ((i == num_pages - 1) &&
3304 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3305 check_page_uptodate(tree, page);
3306 continue;
3307 }
3308 SetPageUptodate(page);
3309 }
3310 return 0;
3311}
Chris Masond1310b22008-01-24 16:13:08 -05003312
Chris Masonce9adaa2008-04-09 16:28:12 -04003313int extent_range_uptodate(struct extent_io_tree *tree,
3314 u64 start, u64 end)
3315{
3316 struct page *page;
3317 int ret;
3318 int pg_uptodate = 1;
3319 int uptodate;
3320 unsigned long index;
3321
Chris Mason9655d292009-09-02 15:22:30 -04003322 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
Chris Masonce9adaa2008-04-09 16:28:12 -04003323 if (ret)
3324 return 1;
Chris Masond3977122009-01-05 21:25:51 -05003325 while (start <= end) {
Chris Masonce9adaa2008-04-09 16:28:12 -04003326 index = start >> PAGE_CACHE_SHIFT;
3327 page = find_get_page(tree->mapping, index);
3328 uptodate = PageUptodate(page);
3329 page_cache_release(page);
3330 if (!uptodate) {
3331 pg_uptodate = 0;
3332 break;
3333 }
3334 start += PAGE_CACHE_SIZE;
3335 }
3336 return pg_uptodate;
3337}
3338
Chris Masond1310b22008-01-24 16:13:08 -05003339int extent_buffer_uptodate(struct extent_io_tree *tree,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003340 struct extent_buffer *eb,
3341 struct extent_state *cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05003342{
Chris Mason728131d2008-04-09 16:28:12 -04003343 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003344 unsigned long num_pages;
3345 unsigned long i;
Chris Mason728131d2008-04-09 16:28:12 -04003346 struct page *page;
3347 int pg_uptodate = 1;
3348
Chris Masonb4ce94d2009-02-04 09:25:08 -05003349 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Mason42352982008-04-28 16:40:52 -04003350 return 1;
Chris Mason728131d2008-04-09 16:28:12 -04003351
Chris Mason42352982008-04-28 16:40:52 -04003352 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003353 EXTENT_UPTODATE, 1, cached_state);
Chris Mason42352982008-04-28 16:40:52 -04003354 if (ret)
3355 return ret;
Chris Mason728131d2008-04-09 16:28:12 -04003356
3357 num_pages = num_extent_pages(eb->start, eb->len);
3358 for (i = 0; i < num_pages; i++) {
3359 page = extent_buffer_page(eb, i);
3360 if (!PageUptodate(page)) {
3361 pg_uptodate = 0;
3362 break;
3363 }
3364 }
Chris Mason42352982008-04-28 16:40:52 -04003365 return pg_uptodate;
Chris Masond1310b22008-01-24 16:13:08 -05003366}
Chris Masond1310b22008-01-24 16:13:08 -05003367
3368int read_extent_buffer_pages(struct extent_io_tree *tree,
3369 struct extent_buffer *eb,
Chris Masona86c12c2008-02-07 10:50:54 -05003370 u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04003371 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003372{
3373 unsigned long i;
3374 unsigned long start_i;
3375 struct page *page;
3376 int err;
3377 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003378 int locked_pages = 0;
3379 int all_uptodate = 1;
3380 int inc_all_pages = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003381 unsigned long num_pages;
Chris Masona86c12c2008-02-07 10:50:54 -05003382 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003383 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05003384
Chris Masonb4ce94d2009-02-04 09:25:08 -05003385 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05003386 return 0;
3387
Chris Masonce9adaa2008-04-09 16:28:12 -04003388 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Chris Mason9655d292009-09-02 15:22:30 -04003389 EXTENT_UPTODATE, 1, NULL)) {
Chris Masond1310b22008-01-24 16:13:08 -05003390 return 0;
3391 }
3392
3393 if (start) {
3394 WARN_ON(start < eb->start);
3395 start_i = (start >> PAGE_CACHE_SHIFT) -
3396 (eb->start >> PAGE_CACHE_SHIFT);
3397 } else {
3398 start_i = 0;
3399 }
3400
3401 num_pages = num_extent_pages(eb->start, eb->len);
3402 for (i = start_i; i < num_pages; i++) {
3403 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003404 if (!wait) {
David Woodhouse2db04962008-08-07 11:19:43 -04003405 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04003406 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05003407 } else {
3408 lock_page(page);
3409 }
Chris Masonce9adaa2008-04-09 16:28:12 -04003410 locked_pages++;
Chris Masond3977122009-01-05 21:25:51 -05003411 if (!PageUptodate(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04003412 all_uptodate = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003413 }
3414 if (all_uptodate) {
3415 if (start_i == 0)
Chris Masonb4ce94d2009-02-04 09:25:08 -05003416 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04003417 goto unlock_exit;
3418 }
3419
3420 for (i = start_i; i < num_pages; i++) {
3421 page = extent_buffer_page(eb, i);
3422 if (inc_all_pages)
3423 page_cache_get(page);
3424 if (!PageUptodate(page)) {
3425 if (start_i == 0)
3426 inc_all_pages = 1;
Chris Masonf1885912008-04-09 16:28:12 -04003427 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05003428 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04003429 get_extent, &bio,
Chris Masonc8b97812008-10-29 14:49:59 -04003430 mirror_num, &bio_flags);
Chris Masond3977122009-01-05 21:25:51 -05003431 if (err)
Chris Masond1310b22008-01-24 16:13:08 -05003432 ret = err;
Chris Masond1310b22008-01-24 16:13:08 -05003433 } else {
3434 unlock_page(page);
3435 }
3436 }
3437
Chris Masona86c12c2008-02-07 10:50:54 -05003438 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04003439 submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masona86c12c2008-02-07 10:50:54 -05003440
Chris Masond3977122009-01-05 21:25:51 -05003441 if (ret || !wait)
Chris Masond1310b22008-01-24 16:13:08 -05003442 return ret;
Chris Masond3977122009-01-05 21:25:51 -05003443
Chris Masond1310b22008-01-24 16:13:08 -05003444 for (i = start_i; i < num_pages; i++) {
3445 page = extent_buffer_page(eb, i);
3446 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05003447 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05003448 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05003449 }
Chris Masond3977122009-01-05 21:25:51 -05003450
Chris Masond1310b22008-01-24 16:13:08 -05003451 if (!ret)
Chris Masonb4ce94d2009-02-04 09:25:08 -05003452 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003453 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04003454
3455unlock_exit:
3456 i = start_i;
Chris Masond3977122009-01-05 21:25:51 -05003457 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04003458 page = extent_buffer_page(eb, i);
3459 i++;
3460 unlock_page(page);
3461 locked_pages--;
3462 }
3463 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003464}
Chris Masond1310b22008-01-24 16:13:08 -05003465
3466void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3467 unsigned long start,
3468 unsigned long len)
3469{
3470 size_t cur;
3471 size_t offset;
3472 struct page *page;
3473 char *kaddr;
3474 char *dst = (char *)dstv;
3475 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3476 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003477
3478 WARN_ON(start > eb->len);
3479 WARN_ON(start + len > eb->start + eb->len);
3480
3481 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3482
Chris Masond3977122009-01-05 21:25:51 -05003483 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003484 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003485
3486 cur = min(len, (PAGE_CACHE_SIZE - offset));
3487 kaddr = kmap_atomic(page, KM_USER1);
3488 memcpy(dst, kaddr + offset, cur);
3489 kunmap_atomic(kaddr, KM_USER1);
3490
3491 dst += cur;
3492 len -= cur;
3493 offset = 0;
3494 i++;
3495 }
3496}
Chris Masond1310b22008-01-24 16:13:08 -05003497
3498int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3499 unsigned long min_len, char **token, char **map,
3500 unsigned long *map_start,
3501 unsigned long *map_len, int km)
3502{
3503 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3504 char *kaddr;
3505 struct page *p;
3506 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3507 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3508 unsigned long end_i = (start_offset + start + min_len - 1) >>
3509 PAGE_CACHE_SHIFT;
3510
3511 if (i != end_i)
3512 return -EINVAL;
3513
3514 if (i == 0) {
3515 offset = start_offset;
3516 *map_start = 0;
3517 } else {
3518 offset = 0;
3519 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3520 }
Chris Masond3977122009-01-05 21:25:51 -05003521
Chris Masond1310b22008-01-24 16:13:08 -05003522 if (start + min_len > eb->len) {
Chris Masond3977122009-01-05 21:25:51 -05003523 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3524 "wanted %lu %lu\n", (unsigned long long)eb->start,
3525 eb->len, start, min_len);
Chris Masond1310b22008-01-24 16:13:08 -05003526 WARN_ON(1);
3527 }
3528
3529 p = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003530 kaddr = kmap_atomic(p, km);
3531 *token = kaddr;
3532 *map = kaddr + offset;
3533 *map_len = PAGE_CACHE_SIZE - offset;
3534 return 0;
3535}
Chris Masond1310b22008-01-24 16:13:08 -05003536
3537int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3538 unsigned long min_len,
3539 char **token, char **map,
3540 unsigned long *map_start,
3541 unsigned long *map_len, int km)
3542{
3543 int err;
3544 int save = 0;
3545 if (eb->map_token) {
3546 unmap_extent_buffer(eb, eb->map_token, km);
3547 eb->map_token = NULL;
3548 save = 1;
3549 }
3550 err = map_private_extent_buffer(eb, start, min_len, token, map,
3551 map_start, map_len, km);
3552 if (!err && save) {
3553 eb->map_token = *token;
3554 eb->kaddr = *map;
3555 eb->map_start = *map_start;
3556 eb->map_len = *map_len;
3557 }
3558 return err;
3559}
Chris Masond1310b22008-01-24 16:13:08 -05003560
3561void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3562{
3563 kunmap_atomic(token, km);
3564}
Chris Masond1310b22008-01-24 16:13:08 -05003565
3566int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3567 unsigned long start,
3568 unsigned long len)
3569{
3570 size_t cur;
3571 size_t offset;
3572 struct page *page;
3573 char *kaddr;
3574 char *ptr = (char *)ptrv;
3575 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3576 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3577 int ret = 0;
3578
3579 WARN_ON(start > eb->len);
3580 WARN_ON(start + len > eb->start + eb->len);
3581
3582 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3583
Chris Masond3977122009-01-05 21:25:51 -05003584 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003585 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003586
3587 cur = min(len, (PAGE_CACHE_SIZE - offset));
3588
3589 kaddr = kmap_atomic(page, KM_USER0);
3590 ret = memcmp(ptr, kaddr + offset, cur);
3591 kunmap_atomic(kaddr, KM_USER0);
3592 if (ret)
3593 break;
3594
3595 ptr += cur;
3596 len -= cur;
3597 offset = 0;
3598 i++;
3599 }
3600 return ret;
3601}
Chris Masond1310b22008-01-24 16:13:08 -05003602
3603void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3604 unsigned long start, unsigned long len)
3605{
3606 size_t cur;
3607 size_t offset;
3608 struct page *page;
3609 char *kaddr;
3610 char *src = (char *)srcv;
3611 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3612 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3613
3614 WARN_ON(start > eb->len);
3615 WARN_ON(start + len > eb->start + eb->len);
3616
3617 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3618
Chris Masond3977122009-01-05 21:25:51 -05003619 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003620 page = extent_buffer_page(eb, i);
3621 WARN_ON(!PageUptodate(page));
3622
3623 cur = min(len, PAGE_CACHE_SIZE - offset);
3624 kaddr = kmap_atomic(page, KM_USER1);
3625 memcpy(kaddr + offset, src, cur);
3626 kunmap_atomic(kaddr, KM_USER1);
3627
3628 src += cur;
3629 len -= cur;
3630 offset = 0;
3631 i++;
3632 }
3633}
Chris Masond1310b22008-01-24 16:13:08 -05003634
3635void memset_extent_buffer(struct extent_buffer *eb, char c,
3636 unsigned long start, unsigned long len)
3637{
3638 size_t cur;
3639 size_t offset;
3640 struct page *page;
3641 char *kaddr;
3642 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3643 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3644
3645 WARN_ON(start > eb->len);
3646 WARN_ON(start + len > eb->start + eb->len);
3647
3648 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3649
Chris Masond3977122009-01-05 21:25:51 -05003650 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003651 page = extent_buffer_page(eb, i);
3652 WARN_ON(!PageUptodate(page));
3653
3654 cur = min(len, PAGE_CACHE_SIZE - offset);
3655 kaddr = kmap_atomic(page, KM_USER0);
3656 memset(kaddr + offset, c, cur);
3657 kunmap_atomic(kaddr, KM_USER0);
3658
3659 len -= cur;
3660 offset = 0;
3661 i++;
3662 }
3663}
Chris Masond1310b22008-01-24 16:13:08 -05003664
3665void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3666 unsigned long dst_offset, unsigned long src_offset,
3667 unsigned long len)
3668{
3669 u64 dst_len = dst->len;
3670 size_t cur;
3671 size_t offset;
3672 struct page *page;
3673 char *kaddr;
3674 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3675 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3676
3677 WARN_ON(src->len != dst_len);
3678
3679 offset = (start_offset + dst_offset) &
3680 ((unsigned long)PAGE_CACHE_SIZE - 1);
3681
Chris Masond3977122009-01-05 21:25:51 -05003682 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003683 page = extent_buffer_page(dst, i);
3684 WARN_ON(!PageUptodate(page));
3685
3686 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3687
3688 kaddr = kmap_atomic(page, KM_USER0);
3689 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3690 kunmap_atomic(kaddr, KM_USER0);
3691
3692 src_offset += cur;
3693 len -= cur;
3694 offset = 0;
3695 i++;
3696 }
3697}
Chris Masond1310b22008-01-24 16:13:08 -05003698
3699static void move_pages(struct page *dst_page, struct page *src_page,
3700 unsigned long dst_off, unsigned long src_off,
3701 unsigned long len)
3702{
3703 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3704 if (dst_page == src_page) {
3705 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3706 } else {
3707 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3708 char *p = dst_kaddr + dst_off + len;
3709 char *s = src_kaddr + src_off + len;
3710
3711 while (len--)
3712 *--p = *--s;
3713
3714 kunmap_atomic(src_kaddr, KM_USER1);
3715 }
3716 kunmap_atomic(dst_kaddr, KM_USER0);
3717}
3718
3719static void copy_pages(struct page *dst_page, struct page *src_page,
3720 unsigned long dst_off, unsigned long src_off,
3721 unsigned long len)
3722{
3723 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3724 char *src_kaddr;
3725
3726 if (dst_page != src_page)
3727 src_kaddr = kmap_atomic(src_page, KM_USER1);
3728 else
3729 src_kaddr = dst_kaddr;
3730
3731 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3732 kunmap_atomic(dst_kaddr, KM_USER0);
3733 if (dst_page != src_page)
3734 kunmap_atomic(src_kaddr, KM_USER1);
3735}
3736
3737void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3738 unsigned long src_offset, unsigned long len)
3739{
3740 size_t cur;
3741 size_t dst_off_in_page;
3742 size_t src_off_in_page;
3743 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3744 unsigned long dst_i;
3745 unsigned long src_i;
3746
3747 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003748 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3749 "len %lu dst len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003750 BUG_ON(1);
3751 }
3752 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003753 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3754 "len %lu dst len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003755 BUG_ON(1);
3756 }
3757
Chris Masond3977122009-01-05 21:25:51 -05003758 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003759 dst_off_in_page = (start_offset + dst_offset) &
3760 ((unsigned long)PAGE_CACHE_SIZE - 1);
3761 src_off_in_page = (start_offset + src_offset) &
3762 ((unsigned long)PAGE_CACHE_SIZE - 1);
3763
3764 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3765 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3766
3767 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3768 src_off_in_page));
3769 cur = min_t(unsigned long, cur,
3770 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3771
3772 copy_pages(extent_buffer_page(dst, dst_i),
3773 extent_buffer_page(dst, src_i),
3774 dst_off_in_page, src_off_in_page, cur);
3775
3776 src_offset += cur;
3777 dst_offset += cur;
3778 len -= cur;
3779 }
3780}
Chris Masond1310b22008-01-24 16:13:08 -05003781
3782void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3783 unsigned long src_offset, unsigned long len)
3784{
3785 size_t cur;
3786 size_t dst_off_in_page;
3787 size_t src_off_in_page;
3788 unsigned long dst_end = dst_offset + len - 1;
3789 unsigned long src_end = src_offset + len - 1;
3790 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3791 unsigned long dst_i;
3792 unsigned long src_i;
3793
3794 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003795 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3796 "len %lu len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003797 BUG_ON(1);
3798 }
3799 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05003800 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3801 "len %lu len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05003802 BUG_ON(1);
3803 }
3804 if (dst_offset < src_offset) {
3805 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3806 return;
3807 }
Chris Masond3977122009-01-05 21:25:51 -05003808 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003809 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3810 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3811
3812 dst_off_in_page = (start_offset + dst_end) &
3813 ((unsigned long)PAGE_CACHE_SIZE - 1);
3814 src_off_in_page = (start_offset + src_end) &
3815 ((unsigned long)PAGE_CACHE_SIZE - 1);
3816
3817 cur = min_t(unsigned long, len, src_off_in_page + 1);
3818 cur = min(cur, dst_off_in_page + 1);
3819 move_pages(extent_buffer_page(dst, dst_i),
3820 extent_buffer_page(dst, src_i),
3821 dst_off_in_page - cur + 1,
3822 src_off_in_page - cur + 1, cur);
3823
3824 dst_end -= cur;
3825 src_end -= cur;
3826 len -= cur;
3827 }
3828}
Chris Mason6af118c2008-07-22 11:18:07 -04003829
3830int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3831{
3832 u64 start = page_offset(page);
3833 struct extent_buffer *eb;
3834 int ret = 1;
3835 unsigned long i;
3836 unsigned long num_pages;
3837
3838 spin_lock(&tree->buffer_lock);
3839 eb = buffer_search(tree, start);
3840 if (!eb)
3841 goto out;
3842
3843 if (atomic_read(&eb->refs) > 1) {
3844 ret = 0;
3845 goto out;
3846 }
Chris Masonb9473432009-03-13 11:00:37 -04003847 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3848 ret = 0;
3849 goto out;
3850 }
Chris Mason6af118c2008-07-22 11:18:07 -04003851 /* at this point we can safely release the extent buffer */
3852 num_pages = num_extent_pages(eb->start, eb->len);
Christoph Hellwigb2141072008-09-05 16:43:31 -04003853 for (i = 0; i < num_pages; i++)
3854 page_cache_release(extent_buffer_page(eb, i));
Chris Mason6af118c2008-07-22 11:18:07 -04003855 rb_erase(&eb->rb_node, &tree->buffer);
3856 __free_extent_buffer(eb);
3857out:
3858 spin_unlock(&tree->buffer_lock);
3859 return ret;
3860}