blob: fe14285b53f1a49d4f39efc2a6621002ddc96fa2 [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05005#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050011#include <linux/writeback.h>
12#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070013#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060014#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050015#include "extent_io.h"
16#include "extent_map.h"
David Woodhouse2db04962008-08-07 11:19:43 -040017#include "compat.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040018#include "ctree.h"
19#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020020#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010021#include "check-integrity.h"
Chris Masond1310b22008-01-24 16:13:08 -050022
Chris Masond1310b22008-01-24 16:13:08 -050023static struct kmem_cache *extent_state_cache;
24static struct kmem_cache *extent_buffer_cache;
25
26static LIST_HEAD(buffers);
27static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040028
Chris Masonb47eda82008-11-10 12:34:40 -050029#define LEAK_DEBUG 0
Chris Mason39351272009-02-04 09:24:05 -050030#if LEAK_DEBUG
Chris Masond3977122009-01-05 21:25:51 -050031static DEFINE_SPINLOCK(leak_lock);
Chris Mason4bef0842008-09-08 11:18:08 -040032#endif
Chris Masond1310b22008-01-24 16:13:08 -050033
Chris Masond1310b22008-01-24 16:13:08 -050034#define BUFFER_LRU_MAX 64
35
36struct tree_entry {
37 u64 start;
38 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -050039 struct rb_node rb_node;
40};
41
42struct extent_page_data {
43 struct bio *bio;
44 struct extent_io_tree *tree;
45 get_extent_t *get_extent;
Chris Mason771ed682008-11-06 22:02:51 -050046
47 /* tells writepage not to lock the state bits for this range
48 * it still does the unlocking
49 */
Chris Masonffbd5172009-04-20 15:50:09 -040050 unsigned int extent_locked:1;
51
52 /* tells the submit_bio code to use a WRITE_SYNC */
53 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -050054};
55
56int __init extent_io_init(void)
57{
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020058 extent_state_cache = kmem_cache_create("extent_state",
59 sizeof(struct extent_state), 0,
60 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050061 if (!extent_state_cache)
62 return -ENOMEM;
63
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020064 extent_buffer_cache = kmem_cache_create("extent_buffers",
65 sizeof(struct extent_buffer), 0,
66 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050067 if (!extent_buffer_cache)
68 goto free_state_cache;
69 return 0;
70
71free_state_cache:
72 kmem_cache_destroy(extent_state_cache);
73 return -ENOMEM;
74}
75
76void extent_io_exit(void)
77{
78 struct extent_state *state;
Chris Mason2d2ae542008-03-26 16:24:23 -040079 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -050080
81 while (!list_empty(&states)) {
Chris Mason2d2ae542008-03-26 16:24:23 -040082 state = list_entry(states.next, struct extent_state, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050083 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
84 "state %lu in tree %p refs %d\n",
85 (unsigned long long)state->start,
86 (unsigned long long)state->end,
87 state->state, state->tree, atomic_read(&state->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040088 list_del(&state->leak_list);
Chris Masond1310b22008-01-24 16:13:08 -050089 kmem_cache_free(extent_state_cache, state);
90
91 }
92
Chris Mason2d2ae542008-03-26 16:24:23 -040093 while (!list_empty(&buffers)) {
94 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050095 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
96 "refs %d\n", (unsigned long long)eb->start,
97 eb->len, atomic_read(&eb->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040098 list_del(&eb->leak_list);
99 kmem_cache_free(extent_buffer_cache, eb);
100 }
Chris Masond1310b22008-01-24 16:13:08 -0500101 if (extent_state_cache)
102 kmem_cache_destroy(extent_state_cache);
103 if (extent_buffer_cache)
104 kmem_cache_destroy(extent_buffer_cache);
105}
106
107void extent_io_tree_init(struct extent_io_tree *tree,
David Sterbaf993c882011-04-20 23:35:57 +0200108 struct address_space *mapping)
Chris Masond1310b22008-01-24 16:13:08 -0500109{
Eric Paris6bef4d32010-02-23 19:43:04 +0000110 tree->state = RB_ROOT;
Miao Xie19fe0a82010-10-26 20:57:29 -0400111 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500112 tree->ops = NULL;
113 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500114 spin_lock_init(&tree->lock);
Chris Mason6af118c2008-07-22 11:18:07 -0400115 spin_lock_init(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -0500116 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500117}
Chris Masond1310b22008-01-24 16:13:08 -0500118
Christoph Hellwigb2950862008-12-02 09:54:17 -0500119static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500120{
121 struct extent_state *state;
Chris Mason39351272009-02-04 09:24:05 -0500122#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400123 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400124#endif
Chris Masond1310b22008-01-24 16:13:08 -0500125
126 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400127 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500128 return state;
129 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500130 state->private = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500131 state->tree = NULL;
Chris Mason39351272009-02-04 09:24:05 -0500132#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400133 spin_lock_irqsave(&leak_lock, flags);
134 list_add(&state->leak_list, &states);
135 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400136#endif
Chris Masond1310b22008-01-24 16:13:08 -0500137 atomic_set(&state->refs, 1);
138 init_waitqueue_head(&state->wq);
139 return state;
140}
Chris Masond1310b22008-01-24 16:13:08 -0500141
Chris Mason4845e442010-05-25 20:56:50 -0400142void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500143{
Chris Masond1310b22008-01-24 16:13:08 -0500144 if (!state)
145 return;
146 if (atomic_dec_and_test(&state->refs)) {
Chris Mason39351272009-02-04 09:24:05 -0500147#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400148 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400149#endif
Chris Mason70dec802008-01-29 09:59:12 -0500150 WARN_ON(state->tree);
Chris Mason39351272009-02-04 09:24:05 -0500151#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400152 spin_lock_irqsave(&leak_lock, flags);
153 list_del(&state->leak_list);
154 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400155#endif
Chris Masond1310b22008-01-24 16:13:08 -0500156 kmem_cache_free(extent_state_cache, state);
157 }
158}
Chris Masond1310b22008-01-24 16:13:08 -0500159
160static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
161 struct rb_node *node)
162{
Chris Masond3977122009-01-05 21:25:51 -0500163 struct rb_node **p = &root->rb_node;
164 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500165 struct tree_entry *entry;
166
Chris Masond3977122009-01-05 21:25:51 -0500167 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500168 parent = *p;
169 entry = rb_entry(parent, struct tree_entry, rb_node);
170
171 if (offset < entry->start)
172 p = &(*p)->rb_left;
173 else if (offset > entry->end)
174 p = &(*p)->rb_right;
175 else
176 return parent;
177 }
178
179 entry = rb_entry(node, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500180 rb_link_node(node, parent, p);
181 rb_insert_color(node, root);
182 return NULL;
183}
184
Chris Mason80ea96b2008-02-01 14:51:59 -0500185static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Chris Masond1310b22008-01-24 16:13:08 -0500186 struct rb_node **prev_ret,
187 struct rb_node **next_ret)
188{
Chris Mason80ea96b2008-02-01 14:51:59 -0500189 struct rb_root *root = &tree->state;
Chris Masond3977122009-01-05 21:25:51 -0500190 struct rb_node *n = root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500191 struct rb_node *prev = NULL;
192 struct rb_node *orig_prev = NULL;
193 struct tree_entry *entry;
194 struct tree_entry *prev_entry = NULL;
195
Chris Masond3977122009-01-05 21:25:51 -0500196 while (n) {
Chris Masond1310b22008-01-24 16:13:08 -0500197 entry = rb_entry(n, struct tree_entry, rb_node);
198 prev = n;
199 prev_entry = entry;
200
201 if (offset < entry->start)
202 n = n->rb_left;
203 else if (offset > entry->end)
204 n = n->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500205 else
Chris Masond1310b22008-01-24 16:13:08 -0500206 return n;
207 }
208
209 if (prev_ret) {
210 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500211 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500212 prev = rb_next(prev);
213 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
214 }
215 *prev_ret = prev;
216 prev = orig_prev;
217 }
218
219 if (next_ret) {
220 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500221 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500222 prev = rb_prev(prev);
223 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 }
225 *next_ret = prev;
226 }
227 return NULL;
228}
229
Chris Mason80ea96b2008-02-01 14:51:59 -0500230static inline struct rb_node *tree_search(struct extent_io_tree *tree,
231 u64 offset)
Chris Masond1310b22008-01-24 16:13:08 -0500232{
Chris Mason70dec802008-01-29 09:59:12 -0500233 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500234 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500235
Chris Mason80ea96b2008-02-01 14:51:59 -0500236 ret = __etree_search(tree, offset, &prev, NULL);
Chris Masond3977122009-01-05 21:25:51 -0500237 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500238 return prev;
239 return ret;
240}
241
Josef Bacik9ed74f22009-09-11 16:12:44 -0400242static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
243 struct extent_state *other)
244{
245 if (tree->ops && tree->ops->merge_extent_hook)
246 tree->ops->merge_extent_hook(tree->mapping->host, new,
247 other);
248}
249
Chris Masond1310b22008-01-24 16:13:08 -0500250/*
251 * utility function to look for merge candidates inside a given range.
252 * Any extents with matching state are merged together into a single
253 * extent in the tree. Extents with EXTENT_IO in their state field
254 * are not merged because the end_io handlers need to be able to do
255 * operations on them without sleeping (or doing allocations/splits).
256 *
257 * This should be called with the tree lock held.
258 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000259static void merge_state(struct extent_io_tree *tree,
260 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500261{
262 struct extent_state *other;
263 struct rb_node *other_node;
264
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400265 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000266 return;
Chris Masond1310b22008-01-24 16:13:08 -0500267
268 other_node = rb_prev(&state->rb_node);
269 if (other_node) {
270 other = rb_entry(other_node, struct extent_state, rb_node);
271 if (other->end == state->start - 1 &&
272 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400273 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500274 state->start = other->start;
Chris Mason70dec802008-01-29 09:59:12 -0500275 other->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500276 rb_erase(&other->rb_node, &tree->state);
277 free_extent_state(other);
278 }
279 }
280 other_node = rb_next(&state->rb_node);
281 if (other_node) {
282 other = rb_entry(other_node, struct extent_state, rb_node);
283 if (other->start == state->end + 1 &&
284 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400285 merge_cb(tree, state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400286 state->end = other->end;
287 other->tree = NULL;
288 rb_erase(&other->rb_node, &tree->state);
289 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500290 }
291 }
Chris Masond1310b22008-01-24 16:13:08 -0500292}
293
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000294static void set_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400295 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500296{
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000297 if (tree->ops && tree->ops->set_bit_hook)
298 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500299}
300
301static void clear_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400302 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500303{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400304 if (tree->ops && tree->ops->clear_bit_hook)
305 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500306}
307
Xiao Guangrong3150b692011-07-14 03:19:08 +0000308static void set_state_bits(struct extent_io_tree *tree,
309 struct extent_state *state, int *bits);
310
Chris Masond1310b22008-01-24 16:13:08 -0500311/*
312 * insert an extent_state struct into the tree. 'bits' are set on the
313 * struct before it is inserted.
314 *
315 * This may return -EEXIST if the extent is already there, in which case the
316 * state struct is freed.
317 *
318 * The tree lock is not taken internally. This is a utility function and
319 * probably isn't what you want to call (see set/clear_extent_bit).
320 */
321static int insert_state(struct extent_io_tree *tree,
322 struct extent_state *state, u64 start, u64 end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400323 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500324{
325 struct rb_node *node;
326
327 if (end < start) {
Chris Masond3977122009-01-05 21:25:51 -0500328 printk(KERN_ERR "btrfs end < start %llu %llu\n",
329 (unsigned long long)end,
330 (unsigned long long)start);
Chris Masond1310b22008-01-24 16:13:08 -0500331 WARN_ON(1);
332 }
Chris Masond1310b22008-01-24 16:13:08 -0500333 state->start = start;
334 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400335
Xiao Guangrong3150b692011-07-14 03:19:08 +0000336 set_state_bits(tree, state, bits);
337
Chris Masond1310b22008-01-24 16:13:08 -0500338 node = tree_insert(&tree->state, end, &state->rb_node);
339 if (node) {
340 struct extent_state *found;
341 found = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500342 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
343 "%llu %llu\n", (unsigned long long)found->start,
344 (unsigned long long)found->end,
345 (unsigned long long)start, (unsigned long long)end);
Chris Masond1310b22008-01-24 16:13:08 -0500346 return -EEXIST;
347 }
Chris Mason70dec802008-01-29 09:59:12 -0500348 state->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500349 merge_state(tree, state);
350 return 0;
351}
352
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000353static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
Josef Bacik9ed74f22009-09-11 16:12:44 -0400354 u64 split)
355{
356 if (tree->ops && tree->ops->split_extent_hook)
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000357 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400358}
359
Chris Masond1310b22008-01-24 16:13:08 -0500360/*
361 * split a given extent state struct in two, inserting the preallocated
362 * struct 'prealloc' as the newly created second half. 'split' indicates an
363 * offset inside 'orig' where it should be split.
364 *
365 * Before calling,
366 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
367 * are two extent state structs in the tree:
368 * prealloc: [orig->start, split - 1]
369 * orig: [ split, orig->end ]
370 *
371 * The tree locks are not taken by this function. They need to be held
372 * by the caller.
373 */
374static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
375 struct extent_state *prealloc, u64 split)
376{
377 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400378
379 split_cb(tree, orig, split);
380
Chris Masond1310b22008-01-24 16:13:08 -0500381 prealloc->start = orig->start;
382 prealloc->end = split - 1;
383 prealloc->state = orig->state;
384 orig->start = split;
385
386 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
387 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500388 free_extent_state(prealloc);
389 return -EEXIST;
390 }
Chris Mason70dec802008-01-29 09:59:12 -0500391 prealloc->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500392 return 0;
393}
394
395/*
396 * utility function to clear some bits in an extent state struct.
397 * it will optionally wake up any one waiting on this state (wake == 1), or
398 * forcibly remove the state from the tree (delete == 1).
399 *
400 * If no bits are set on the state struct after clearing things, the
401 * struct is freed and removed from the tree
402 */
403static int clear_state_bit(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400404 struct extent_state *state,
405 int *bits, int wake)
Chris Masond1310b22008-01-24 16:13:08 -0500406{
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400407 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
Josef Bacik32c00af2009-10-08 13:34:05 -0400408 int ret = state->state & bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500409
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400410 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500411 u64 range = state->end - state->start + 1;
412 WARN_ON(range > tree->dirty_bytes);
413 tree->dirty_bytes -= range;
414 }
Chris Mason291d6732008-01-29 15:55:23 -0500415 clear_state_cb(tree, state, bits);
Josef Bacik32c00af2009-10-08 13:34:05 -0400416 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500417 if (wake)
418 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400419 if (state->state == 0) {
Chris Mason70dec802008-01-29 09:59:12 -0500420 if (state->tree) {
Chris Masond1310b22008-01-24 16:13:08 -0500421 rb_erase(&state->rb_node, &tree->state);
Chris Mason70dec802008-01-29 09:59:12 -0500422 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500423 free_extent_state(state);
424 } else {
425 WARN_ON(1);
426 }
427 } else {
428 merge_state(tree, state);
429 }
430 return ret;
431}
432
Xiao Guangrong82337672011-04-20 06:44:57 +0000433static struct extent_state *
434alloc_extent_state_atomic(struct extent_state *prealloc)
435{
436 if (!prealloc)
437 prealloc = alloc_extent_state(GFP_ATOMIC);
438
439 return prealloc;
440}
441
Chris Masond1310b22008-01-24 16:13:08 -0500442/*
443 * clear some bits on a range in the tree. This may require splitting
444 * or inserting elements in the tree, so the gfp mask is used to
445 * indicate which allocations or sleeping are allowed.
446 *
447 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
448 * the given range from the tree regardless of state (ie for truncate).
449 *
450 * the range [start, end] is inclusive.
451 *
452 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
453 * bits were already set, or zero if none of the bits were already set.
454 */
455int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -0400456 int bits, int wake, int delete,
457 struct extent_state **cached_state,
458 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500459{
460 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400461 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500462 struct extent_state *prealloc = NULL;
Chris Mason2c64c532009-09-02 15:04:12 -0400463 struct rb_node *next_node;
Chris Masond1310b22008-01-24 16:13:08 -0500464 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400465 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500466 int err;
467 int set = 0;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000468 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500469
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400470 if (delete)
471 bits |= ~EXTENT_CTLBITS;
472 bits |= EXTENT_FIRST_DELALLOC;
473
Josef Bacik2ac55d42010-02-03 19:33:23 +0000474 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
475 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500476again:
477 if (!prealloc && (mask & __GFP_WAIT)) {
478 prealloc = alloc_extent_state(mask);
479 if (!prealloc)
480 return -ENOMEM;
481 }
482
Chris Masoncad321a2008-12-17 14:51:42 -0500483 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400484 if (cached_state) {
485 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000486
487 if (clear) {
488 *cached_state = NULL;
489 cached_state = NULL;
490 }
491
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400492 if (cached && cached->tree && cached->start <= start &&
493 cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000494 if (clear)
495 atomic_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400496 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400497 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400498 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000499 if (clear)
500 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400501 }
Chris Masond1310b22008-01-24 16:13:08 -0500502 /*
503 * this search will find the extents that end after
504 * our range starts
505 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500506 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500507 if (!node)
508 goto out;
509 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400510hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500511 if (state->start > end)
512 goto out;
513 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400514 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500515
Liu Bo04493142012-02-16 18:34:37 +0800516 if (state->end < end && !need_resched())
517 next_node = rb_next(&state->rb_node);
518 else
519 next_node = NULL;
520
521 /* the state doesn't have the wanted bits, go ahead */
522 if (!(state->state & bits))
523 goto next;
524
Chris Masond1310b22008-01-24 16:13:08 -0500525 /*
526 * | ---- desired range ---- |
527 * | state | or
528 * | ------------- state -------------- |
529 *
530 * We need to split the extent we found, and may flip
531 * bits on second half.
532 *
533 * If the extent we found extends past our range, we
534 * just split and search again. It'll get split again
535 * the next time though.
536 *
537 * If the extent we found is inside our range, we clear
538 * the desired bit on it.
539 */
540
541 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000542 prealloc = alloc_extent_state_atomic(prealloc);
543 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500544 err = split_state(tree, state, prealloc, start);
545 BUG_ON(err == -EEXIST);
546 prealloc = NULL;
547 if (err)
548 goto out;
549 if (state->end <= end) {
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400550 set |= clear_state_bit(tree, state, &bits, wake);
Yan Zheng5c939df2009-05-27 09:16:03 -0400551 if (last_end == (u64)-1)
552 goto out;
553 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500554 }
555 goto search_again;
556 }
557 /*
558 * | ---- desired range ---- |
559 * | state |
560 * We need to split the extent, and clear the bit
561 * on the first half
562 */
563 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000564 prealloc = alloc_extent_state_atomic(prealloc);
565 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500566 err = split_state(tree, state, prealloc, end + 1);
567 BUG_ON(err == -EEXIST);
Chris Masond1310b22008-01-24 16:13:08 -0500568 if (wake)
569 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400570
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400571 set |= clear_state_bit(tree, prealloc, &bits, wake);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400572
Chris Masond1310b22008-01-24 16:13:08 -0500573 prealloc = NULL;
574 goto out;
575 }
Chris Mason42daec22009-09-23 19:51:09 -0400576
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400577 set |= clear_state_bit(tree, state, &bits, wake);
Liu Bo04493142012-02-16 18:34:37 +0800578next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400579 if (last_end == (u64)-1)
580 goto out;
581 start = last_end + 1;
Chris Mason2c64c532009-09-02 15:04:12 -0400582 if (start <= end && next_node) {
583 state = rb_entry(next_node, struct extent_state,
584 rb_node);
585 if (state->start == start)
586 goto hit_next;
587 }
Chris Masond1310b22008-01-24 16:13:08 -0500588 goto search_again;
589
590out:
Chris Masoncad321a2008-12-17 14:51:42 -0500591 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500592 if (prealloc)
593 free_extent_state(prealloc);
594
595 return set;
596
597search_again:
598 if (start > end)
599 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500600 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500601 if (mask & __GFP_WAIT)
602 cond_resched();
603 goto again;
604}
Chris Masond1310b22008-01-24 16:13:08 -0500605
606static int wait_on_state(struct extent_io_tree *tree,
607 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500608 __releases(tree->lock)
609 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500610{
611 DEFINE_WAIT(wait);
612 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500613 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500614 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500615 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500616 finish_wait(&state->wq, &wait);
617 return 0;
618}
619
620/*
621 * waits for one or more bits to clear on a range in the state tree.
622 * The range [start, end] is inclusive.
623 * The tree lock is taken by this function
624 */
625int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
626{
627 struct extent_state *state;
628 struct rb_node *node;
629
Chris Masoncad321a2008-12-17 14:51:42 -0500630 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500631again:
632 while (1) {
633 /*
634 * this search will find all the extents that end after
635 * our range starts
636 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500637 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500638 if (!node)
639 break;
640
641 state = rb_entry(node, struct extent_state, rb_node);
642
643 if (state->start > end)
644 goto out;
645
646 if (state->state & bits) {
647 start = state->start;
648 atomic_inc(&state->refs);
649 wait_on_state(tree, state);
650 free_extent_state(state);
651 goto again;
652 }
653 start = state->end + 1;
654
655 if (start > end)
656 break;
657
Xiao Guangrongded91f02011-07-14 03:19:27 +0000658 cond_resched_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500659 }
660out:
Chris Masoncad321a2008-12-17 14:51:42 -0500661 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500662 return 0;
663}
Chris Masond1310b22008-01-24 16:13:08 -0500664
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000665static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500666 struct extent_state *state,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400667 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500668{
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400669 int bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400670
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000671 set_state_cb(tree, state, bits);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400672 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500673 u64 range = state->end - state->start + 1;
674 tree->dirty_bytes += range;
675 }
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400676 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500677}
678
Chris Mason2c64c532009-09-02 15:04:12 -0400679static void cache_state(struct extent_state *state,
680 struct extent_state **cached_ptr)
681{
682 if (cached_ptr && !(*cached_ptr)) {
683 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
684 *cached_ptr = state;
685 atomic_inc(&state->refs);
686 }
687 }
688}
689
Arne Jansen507903b2011-04-06 10:02:20 +0000690static void uncache_state(struct extent_state **cached_ptr)
691{
692 if (cached_ptr && (*cached_ptr)) {
693 struct extent_state *state = *cached_ptr;
Chris Mason109b36a2011-04-12 13:57:39 -0400694 *cached_ptr = NULL;
695 free_extent_state(state);
Arne Jansen507903b2011-04-06 10:02:20 +0000696 }
697}
698
Chris Masond1310b22008-01-24 16:13:08 -0500699/*
Chris Mason1edbb732009-09-02 13:24:36 -0400700 * set some bits on a range in the tree. This may require allocations or
701 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500702 *
Chris Mason1edbb732009-09-02 13:24:36 -0400703 * If any of the exclusive bits are set, this will fail with -EEXIST if some
704 * part of the range already has the desired bits set. The start of the
705 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500706 *
Chris Mason1edbb732009-09-02 13:24:36 -0400707 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500708 */
Chris Mason1edbb732009-09-02 13:24:36 -0400709
Chris Mason4845e442010-05-25 20:56:50 -0400710int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
711 int bits, int exclusive_bits, u64 *failed_start,
712 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500713{
714 struct extent_state *state;
715 struct extent_state *prealloc = NULL;
716 struct rb_node *node;
Chris Masond1310b22008-01-24 16:13:08 -0500717 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500718 u64 last_start;
719 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400720
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400721 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500722again:
723 if (!prealloc && (mask & __GFP_WAIT)) {
724 prealloc = alloc_extent_state(mask);
Xiao Guangrong82337672011-04-20 06:44:57 +0000725 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500726 }
727
Chris Masoncad321a2008-12-17 14:51:42 -0500728 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400729 if (cached_state && *cached_state) {
730 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400731 if (state->start <= start && state->end > start &&
732 state->tree) {
Chris Mason9655d292009-09-02 15:22:30 -0400733 node = &state->rb_node;
734 goto hit_next;
735 }
736 }
Chris Masond1310b22008-01-24 16:13:08 -0500737 /*
738 * this search will find all the extents that end after
739 * our range starts.
740 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500741 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500742 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000743 prealloc = alloc_extent_state_atomic(prealloc);
744 BUG_ON(!prealloc);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400745 err = insert_state(tree, prealloc, start, end, &bits);
Chris Masond1310b22008-01-24 16:13:08 -0500746 prealloc = NULL;
747 BUG_ON(err == -EEXIST);
748 goto out;
749 }
Chris Masond1310b22008-01-24 16:13:08 -0500750 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400751hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500752 last_start = state->start;
753 last_end = state->end;
754
755 /*
756 * | ---- desired range ---- |
757 * | state |
758 *
759 * Just lock what we found and keep going
760 */
761 if (state->start == start && state->end <= end) {
Chris Mason40431d62009-08-05 12:57:59 -0400762 struct rb_node *next_node;
Chris Mason1edbb732009-09-02 13:24:36 -0400763 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500764 *failed_start = state->start;
765 err = -EEXIST;
766 goto out;
767 }
Chris Mason42daec22009-09-23 19:51:09 -0400768
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000769 set_state_bits(tree, state, &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400770
Chris Mason2c64c532009-09-02 15:04:12 -0400771 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500772 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400773 if (last_end == (u64)-1)
774 goto out;
Chris Mason40431d62009-08-05 12:57:59 -0400775
Yan Zheng5c939df2009-05-27 09:16:03 -0400776 start = last_end + 1;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400777 next_node = rb_next(&state->rb_node);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000778 if (next_node && start < end && prealloc && !need_resched()) {
779 state = rb_entry(next_node, struct extent_state,
780 rb_node);
781 if (state->start == start)
782 goto hit_next;
Chris Mason40431d62009-08-05 12:57:59 -0400783 }
Chris Masond1310b22008-01-24 16:13:08 -0500784 goto search_again;
785 }
786
787 /*
788 * | ---- desired range ---- |
789 * | state |
790 * or
791 * | ------------- state -------------- |
792 *
793 * We need to split the extent we found, and may flip bits on
794 * second half.
795 *
796 * If the extent we found extends past our
797 * range, we just split and search again. It'll get split
798 * again the next time though.
799 *
800 * If the extent we found is inside our range, we set the
801 * desired bit on it.
802 */
803 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400804 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500805 *failed_start = start;
806 err = -EEXIST;
807 goto out;
808 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000809
810 prealloc = alloc_extent_state_atomic(prealloc);
811 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500812 err = split_state(tree, state, prealloc, start);
813 BUG_ON(err == -EEXIST);
814 prealloc = NULL;
815 if (err)
816 goto out;
817 if (state->end <= end) {
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000818 set_state_bits(tree, state, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400819 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500820 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400821 if (last_end == (u64)-1)
822 goto out;
823 start = last_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -0500824 }
825 goto search_again;
826 }
827 /*
828 * | ---- desired range ---- |
829 * | state | or | state |
830 *
831 * There's a hole, we need to insert something in it and
832 * ignore the extent we found.
833 */
834 if (state->start > start) {
835 u64 this_end;
836 if (end < last_start)
837 this_end = end;
838 else
Chris Masond3977122009-01-05 21:25:51 -0500839 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +0000840
841 prealloc = alloc_extent_state_atomic(prealloc);
842 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000843
844 /*
845 * Avoid to free 'prealloc' if it can be merged with
846 * the later extent.
847 */
Chris Masond1310b22008-01-24 16:13:08 -0500848 err = insert_state(tree, prealloc, start, this_end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400849 &bits);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400850 BUG_ON(err == -EEXIST);
851 if (err) {
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000852 free_extent_state(prealloc);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400853 prealloc = NULL;
854 goto out;
855 }
Chris Mason2c64c532009-09-02 15:04:12 -0400856 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500857 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500858 start = this_end + 1;
859 goto search_again;
860 }
861 /*
862 * | ---- desired range ---- |
863 * | state |
864 * We need to split the extent, and set the bit
865 * on the first half
866 */
867 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400868 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500869 *failed_start = start;
870 err = -EEXIST;
871 goto out;
872 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000873
874 prealloc = alloc_extent_state_atomic(prealloc);
875 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500876 err = split_state(tree, state, prealloc, end + 1);
877 BUG_ON(err == -EEXIST);
878
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000879 set_state_bits(tree, prealloc, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400880 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500881 merge_state(tree, prealloc);
882 prealloc = NULL;
883 goto out;
884 }
885
886 goto search_again;
887
888out:
Chris Masoncad321a2008-12-17 14:51:42 -0500889 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500890 if (prealloc)
891 free_extent_state(prealloc);
892
893 return err;
894
895search_again:
896 if (start > end)
897 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500898 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500899 if (mask & __GFP_WAIT)
900 cond_resched();
901 goto again;
902}
Chris Masond1310b22008-01-24 16:13:08 -0500903
Josef Bacik462d6fa2011-09-26 13:56:12 -0400904/**
905 * convert_extent - convert all bits in a given range from one bit to another
906 * @tree: the io tree to search
907 * @start: the start offset in bytes
908 * @end: the end offset in bytes (inclusive)
909 * @bits: the bits to set in this range
910 * @clear_bits: the bits to clear in this range
911 * @mask: the allocation mask
912 *
913 * This will go through and set bits for the given range. If any states exist
914 * already in this range they are set with the given bit and cleared of the
915 * clear_bits. This is only meant to be used by things that are mergeable, ie
916 * converting from say DELALLOC to DIRTY. This is not meant to be used with
917 * boundary bits like LOCK.
918 */
919int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
920 int bits, int clear_bits, gfp_t mask)
921{
922 struct extent_state *state;
923 struct extent_state *prealloc = NULL;
924 struct rb_node *node;
925 int err = 0;
926 u64 last_start;
927 u64 last_end;
928
929again:
930 if (!prealloc && (mask & __GFP_WAIT)) {
931 prealloc = alloc_extent_state(mask);
932 if (!prealloc)
933 return -ENOMEM;
934 }
935
936 spin_lock(&tree->lock);
937 /*
938 * this search will find all the extents that end after
939 * our range starts.
940 */
941 node = tree_search(tree, start);
942 if (!node) {
943 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -0500944 if (!prealloc) {
945 err = -ENOMEM;
946 goto out;
947 }
Josef Bacik462d6fa2011-09-26 13:56:12 -0400948 err = insert_state(tree, prealloc, start, end, &bits);
949 prealloc = NULL;
950 BUG_ON(err == -EEXIST);
951 goto out;
952 }
953 state = rb_entry(node, struct extent_state, rb_node);
954hit_next:
955 last_start = state->start;
956 last_end = state->end;
957
958 /*
959 * | ---- desired range ---- |
960 * | state |
961 *
962 * Just lock what we found and keep going
963 */
964 if (state->start == start && state->end <= end) {
965 struct rb_node *next_node;
966
967 set_state_bits(tree, state, &bits);
968 clear_state_bit(tree, state, &clear_bits, 0);
969
970 merge_state(tree, state);
971 if (last_end == (u64)-1)
972 goto out;
973
974 start = last_end + 1;
975 next_node = rb_next(&state->rb_node);
976 if (next_node && start < end && prealloc && !need_resched()) {
977 state = rb_entry(next_node, struct extent_state,
978 rb_node);
979 if (state->start == start)
980 goto hit_next;
981 }
982 goto search_again;
983 }
984
985 /*
986 * | ---- desired range ---- |
987 * | state |
988 * or
989 * | ------------- state -------------- |
990 *
991 * We need to split the extent we found, and may flip bits on
992 * second half.
993 *
994 * If the extent we found extends past our
995 * range, we just split and search again. It'll get split
996 * again the next time though.
997 *
998 * If the extent we found is inside our range, we set the
999 * desired bit on it.
1000 */
1001 if (state->start < start) {
1002 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001003 if (!prealloc) {
1004 err = -ENOMEM;
1005 goto out;
1006 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001007 err = split_state(tree, state, prealloc, start);
1008 BUG_ON(err == -EEXIST);
1009 prealloc = NULL;
1010 if (err)
1011 goto out;
1012 if (state->end <= end) {
1013 set_state_bits(tree, state, &bits);
1014 clear_state_bit(tree, state, &clear_bits, 0);
1015 merge_state(tree, state);
1016 if (last_end == (u64)-1)
1017 goto out;
1018 start = last_end + 1;
1019 }
1020 goto search_again;
1021 }
1022 /*
1023 * | ---- desired range ---- |
1024 * | state | or | state |
1025 *
1026 * There's a hole, we need to insert something in it and
1027 * ignore the extent we found.
1028 */
1029 if (state->start > start) {
1030 u64 this_end;
1031 if (end < last_start)
1032 this_end = end;
1033 else
1034 this_end = last_start - 1;
1035
1036 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001037 if (!prealloc) {
1038 err = -ENOMEM;
1039 goto out;
1040 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001041
1042 /*
1043 * Avoid to free 'prealloc' if it can be merged with
1044 * the later extent.
1045 */
1046 err = insert_state(tree, prealloc, start, this_end,
1047 &bits);
1048 BUG_ON(err == -EEXIST);
1049 if (err) {
1050 free_extent_state(prealloc);
1051 prealloc = NULL;
1052 goto out;
1053 }
1054 prealloc = NULL;
1055 start = this_end + 1;
1056 goto search_again;
1057 }
1058 /*
1059 * | ---- desired range ---- |
1060 * | state |
1061 * We need to split the extent, and set the bit
1062 * on the first half
1063 */
1064 if (state->start <= end && state->end > end) {
1065 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001066 if (!prealloc) {
1067 err = -ENOMEM;
1068 goto out;
1069 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001070
1071 err = split_state(tree, state, prealloc, end + 1);
1072 BUG_ON(err == -EEXIST);
1073
1074 set_state_bits(tree, prealloc, &bits);
1075 clear_state_bit(tree, prealloc, &clear_bits, 0);
1076
1077 merge_state(tree, prealloc);
1078 prealloc = NULL;
1079 goto out;
1080 }
1081
1082 goto search_again;
1083
1084out:
1085 spin_unlock(&tree->lock);
1086 if (prealloc)
1087 free_extent_state(prealloc);
1088
1089 return err;
1090
1091search_again:
1092 if (start > end)
1093 goto out;
1094 spin_unlock(&tree->lock);
1095 if (mask & __GFP_WAIT)
1096 cond_resched();
1097 goto again;
1098}
1099
Chris Masond1310b22008-01-24 16:13:08 -05001100/* wrappers around set/clear extent bit */
1101int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1102 gfp_t mask)
1103{
1104 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001105 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001106}
Chris Masond1310b22008-01-24 16:13:08 -05001107
1108int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1109 int bits, gfp_t mask)
1110{
1111 return set_extent_bit(tree, start, end, bits, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001112 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001113}
Chris Masond1310b22008-01-24 16:13:08 -05001114
1115int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1116 int bits, gfp_t mask)
1117{
Chris Mason2c64c532009-09-02 15:04:12 -04001118 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001119}
Chris Masond1310b22008-01-24 16:13:08 -05001120
1121int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001122 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001123{
1124 return set_extent_bit(tree, start, end,
Liu Bofee187d2011-09-29 15:55:28 +08001125 EXTENT_DELALLOC | EXTENT_UPTODATE,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001126 0, NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001127}
Chris Masond1310b22008-01-24 16:13:08 -05001128
1129int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1130 gfp_t mask)
1131{
1132 return clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04001133 EXTENT_DIRTY | EXTENT_DELALLOC |
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -04001134 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001135}
Chris Masond1310b22008-01-24 16:13:08 -05001136
1137int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1138 gfp_t mask)
1139{
1140 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001141 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001142}
Chris Masond1310b22008-01-24 16:13:08 -05001143
Chris Masond1310b22008-01-24 16:13:08 -05001144int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
Arne Jansen507903b2011-04-06 10:02:20 +00001145 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001146{
Arne Jansen507903b2011-04-06 10:02:20 +00001147 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1148 NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001149}
Chris Masond1310b22008-01-24 16:13:08 -05001150
Chris Masond3977122009-01-05 21:25:51 -05001151static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001152 u64 end, struct extent_state **cached_state,
1153 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001154{
Chris Mason2c64c532009-09-02 15:04:12 -04001155 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001156 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001157}
Chris Masond1310b22008-01-24 16:13:08 -05001158
Chris Masond352ac62008-09-29 15:18:18 -04001159/*
1160 * either insert or lock state struct between start and end use mask to tell
1161 * us if waiting is desired.
1162 */
Chris Mason1edbb732009-09-02 13:24:36 -04001163int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -04001164 int bits, struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001165{
1166 int err;
1167 u64 failed_start;
1168 while (1) {
Chris Mason1edbb732009-09-02 13:24:36 -04001169 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
Chris Mason2c64c532009-09-02 15:04:12 -04001170 EXTENT_LOCKED, &failed_start,
1171 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001172 if (err == -EEXIST && (mask & __GFP_WAIT)) {
1173 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1174 start = failed_start;
1175 } else {
1176 break;
1177 }
1178 WARN_ON(start > end);
1179 }
1180 return err;
1181}
Chris Masond1310b22008-01-24 16:13:08 -05001182
Chris Mason1edbb732009-09-02 13:24:36 -04001183int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1184{
Chris Mason2c64c532009-09-02 15:04:12 -04001185 return lock_extent_bits(tree, start, end, 0, NULL, mask);
Chris Mason1edbb732009-09-02 13:24:36 -04001186}
1187
Josef Bacik25179202008-10-29 14:49:05 -04001188int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1189 gfp_t mask)
1190{
1191 int err;
1192 u64 failed_start;
1193
Chris Mason2c64c532009-09-02 15:04:12 -04001194 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1195 &failed_start, NULL, mask);
Yan Zheng66435582008-10-30 14:19:50 -04001196 if (err == -EEXIST) {
1197 if (failed_start > start)
1198 clear_extent_bit(tree, start, failed_start - 1,
Chris Mason2c64c532009-09-02 15:04:12 -04001199 EXTENT_LOCKED, 1, 0, NULL, mask);
Josef Bacik25179202008-10-29 14:49:05 -04001200 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001201 }
Josef Bacik25179202008-10-29 14:49:05 -04001202 return 1;
1203}
Josef Bacik25179202008-10-29 14:49:05 -04001204
Chris Mason2c64c532009-09-02 15:04:12 -04001205int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1206 struct extent_state **cached, gfp_t mask)
1207{
1208 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1209 mask);
1210}
1211
Arne Jansen507903b2011-04-06 10:02:20 +00001212int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001213{
Chris Mason2c64c532009-09-02 15:04:12 -04001214 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1215 mask);
Chris Masond1310b22008-01-24 16:13:08 -05001216}
Chris Masond1310b22008-01-24 16:13:08 -05001217
1218/*
Chris Masond1310b22008-01-24 16:13:08 -05001219 * helper function to set both pages and extents in the tree writeback
1220 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05001221static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001222{
1223 unsigned long index = start >> PAGE_CACHE_SHIFT;
1224 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1225 struct page *page;
1226
1227 while (index <= end_index) {
1228 page = find_get_page(tree->mapping, index);
1229 BUG_ON(!page);
1230 set_page_writeback(page);
1231 page_cache_release(page);
1232 index++;
1233 }
Chris Masond1310b22008-01-24 16:13:08 -05001234 return 0;
1235}
Chris Masond1310b22008-01-24 16:13:08 -05001236
Chris Masond352ac62008-09-29 15:18:18 -04001237/* find the first state struct with 'bits' set after 'start', and
1238 * return it. tree->lock must be held. NULL will returned if
1239 * nothing was found after 'start'
1240 */
Chris Masond7fc6402008-02-18 12:12:38 -05001241struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1242 u64 start, int bits)
1243{
1244 struct rb_node *node;
1245 struct extent_state *state;
1246
1247 /*
1248 * this search will find all the extents that end after
1249 * our range starts.
1250 */
1251 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001252 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001253 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001254
Chris Masond3977122009-01-05 21:25:51 -05001255 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001256 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001257 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001258 return state;
Chris Masond3977122009-01-05 21:25:51 -05001259
Chris Masond7fc6402008-02-18 12:12:38 -05001260 node = rb_next(node);
1261 if (!node)
1262 break;
1263 }
1264out:
1265 return NULL;
1266}
Chris Masond7fc6402008-02-18 12:12:38 -05001267
Chris Masond352ac62008-09-29 15:18:18 -04001268/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001269 * find the first offset in the io tree with 'bits' set. zero is
1270 * returned if we find something, and *start_ret and *end_ret are
1271 * set to reflect the state struct that was found.
1272 *
1273 * If nothing was found, 1 is returned, < 0 on error
1274 */
1275int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1276 u64 *start_ret, u64 *end_ret, int bits)
1277{
1278 struct extent_state *state;
1279 int ret = 1;
1280
1281 spin_lock(&tree->lock);
1282 state = find_first_extent_bit_state(tree, start, bits);
1283 if (state) {
1284 *start_ret = state->start;
1285 *end_ret = state->end;
1286 ret = 0;
1287 }
1288 spin_unlock(&tree->lock);
1289 return ret;
1290}
1291
1292/*
Chris Masond352ac62008-09-29 15:18:18 -04001293 * find a contiguous range of bytes in the file marked as delalloc, not
1294 * more than 'max_bytes'. start and end are used to return the range,
1295 *
1296 * 1 is returned if we find something, 0 if nothing was in the tree
1297 */
Chris Masonc8b97812008-10-29 14:49:59 -04001298static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001299 u64 *start, u64 *end, u64 max_bytes,
1300 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001301{
1302 struct rb_node *node;
1303 struct extent_state *state;
1304 u64 cur_start = *start;
1305 u64 found = 0;
1306 u64 total_bytes = 0;
1307
Chris Masoncad321a2008-12-17 14:51:42 -05001308 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001309
Chris Masond1310b22008-01-24 16:13:08 -05001310 /*
1311 * this search will find all the extents that end after
1312 * our range starts.
1313 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001314 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001315 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001316 if (!found)
1317 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001318 goto out;
1319 }
1320
Chris Masond3977122009-01-05 21:25:51 -05001321 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001322 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001323 if (found && (state->start != cur_start ||
1324 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001325 goto out;
1326 }
1327 if (!(state->state & EXTENT_DELALLOC)) {
1328 if (!found)
1329 *end = state->end;
1330 goto out;
1331 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001332 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001333 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001334 *cached_state = state;
1335 atomic_inc(&state->refs);
1336 }
Chris Masond1310b22008-01-24 16:13:08 -05001337 found++;
1338 *end = state->end;
1339 cur_start = state->end + 1;
1340 node = rb_next(node);
1341 if (!node)
1342 break;
1343 total_bytes += state->end - state->start + 1;
1344 if (total_bytes >= max_bytes)
1345 break;
1346 }
1347out:
Chris Masoncad321a2008-12-17 14:51:42 -05001348 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001349 return found;
1350}
1351
Chris Masonc8b97812008-10-29 14:49:59 -04001352static noinline int __unlock_for_delalloc(struct inode *inode,
1353 struct page *locked_page,
1354 u64 start, u64 end)
1355{
1356 int ret;
1357 struct page *pages[16];
1358 unsigned long index = start >> PAGE_CACHE_SHIFT;
1359 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1360 unsigned long nr_pages = end_index - index + 1;
1361 int i;
1362
1363 if (index == locked_page->index && end_index == index)
1364 return 0;
1365
Chris Masond3977122009-01-05 21:25:51 -05001366 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001367 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001368 min_t(unsigned long, nr_pages,
1369 ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001370 for (i = 0; i < ret; i++) {
1371 if (pages[i] != locked_page)
1372 unlock_page(pages[i]);
1373 page_cache_release(pages[i]);
1374 }
1375 nr_pages -= ret;
1376 index += ret;
1377 cond_resched();
1378 }
1379 return 0;
1380}
1381
1382static noinline int lock_delalloc_pages(struct inode *inode,
1383 struct page *locked_page,
1384 u64 delalloc_start,
1385 u64 delalloc_end)
1386{
1387 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1388 unsigned long start_index = index;
1389 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1390 unsigned long pages_locked = 0;
1391 struct page *pages[16];
1392 unsigned long nrpages;
1393 int ret;
1394 int i;
1395
1396 /* the caller is responsible for locking the start index */
1397 if (index == locked_page->index && index == end_index)
1398 return 0;
1399
1400 /* skip the page at the start index */
1401 nrpages = end_index - index + 1;
Chris Masond3977122009-01-05 21:25:51 -05001402 while (nrpages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001403 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001404 min_t(unsigned long,
1405 nrpages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001406 if (ret == 0) {
1407 ret = -EAGAIN;
1408 goto done;
1409 }
1410 /* now we have an array of pages, lock them all */
1411 for (i = 0; i < ret; i++) {
1412 /*
1413 * the caller is taking responsibility for
1414 * locked_page
1415 */
Chris Mason771ed682008-11-06 22:02:51 -05001416 if (pages[i] != locked_page) {
Chris Masonc8b97812008-10-29 14:49:59 -04001417 lock_page(pages[i]);
Chris Masonf2b1c412008-11-10 07:31:30 -05001418 if (!PageDirty(pages[i]) ||
1419 pages[i]->mapping != inode->i_mapping) {
Chris Mason771ed682008-11-06 22:02:51 -05001420 ret = -EAGAIN;
1421 unlock_page(pages[i]);
1422 page_cache_release(pages[i]);
1423 goto done;
1424 }
1425 }
Chris Masonc8b97812008-10-29 14:49:59 -04001426 page_cache_release(pages[i]);
Chris Mason771ed682008-11-06 22:02:51 -05001427 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001428 }
Chris Masonc8b97812008-10-29 14:49:59 -04001429 nrpages -= ret;
1430 index += ret;
1431 cond_resched();
1432 }
1433 ret = 0;
1434done:
1435 if (ret && pages_locked) {
1436 __unlock_for_delalloc(inode, locked_page,
1437 delalloc_start,
1438 ((u64)(start_index + pages_locked - 1)) <<
1439 PAGE_CACHE_SHIFT);
1440 }
1441 return ret;
1442}
1443
1444/*
1445 * find a contiguous range of bytes in the file marked as delalloc, not
1446 * more than 'max_bytes'. start and end are used to return the range,
1447 *
1448 * 1 is returned if we find something, 0 if nothing was in the tree
1449 */
1450static noinline u64 find_lock_delalloc_range(struct inode *inode,
1451 struct extent_io_tree *tree,
1452 struct page *locked_page,
1453 u64 *start, u64 *end,
1454 u64 max_bytes)
1455{
1456 u64 delalloc_start;
1457 u64 delalloc_end;
1458 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001459 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001460 int ret;
1461 int loops = 0;
1462
1463again:
1464 /* step one, find a bunch of delalloc bytes starting at start */
1465 delalloc_start = *start;
1466 delalloc_end = 0;
1467 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001468 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001469 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001470 *start = delalloc_start;
1471 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001472 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001473 return found;
1474 }
1475
1476 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001477 * start comes from the offset of locked_page. We have to lock
1478 * pages in order, so we can't process delalloc bytes before
1479 * locked_page
1480 */
Chris Masond3977122009-01-05 21:25:51 -05001481 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001482 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001483
1484 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001485 * make sure to limit the number of pages we try to lock down
1486 * if we're looping.
1487 */
Chris Masond3977122009-01-05 21:25:51 -05001488 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
Chris Mason771ed682008-11-06 22:02:51 -05001489 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
Chris Masond3977122009-01-05 21:25:51 -05001490
Chris Masonc8b97812008-10-29 14:49:59 -04001491 /* step two, lock all the pages after the page that has start */
1492 ret = lock_delalloc_pages(inode, locked_page,
1493 delalloc_start, delalloc_end);
1494 if (ret == -EAGAIN) {
1495 /* some of the pages are gone, lets avoid looping by
1496 * shortening the size of the delalloc range we're searching
1497 */
Chris Mason9655d292009-09-02 15:22:30 -04001498 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001499 if (!loops) {
1500 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1501 max_bytes = PAGE_CACHE_SIZE - offset;
1502 loops = 1;
1503 goto again;
1504 } else {
1505 found = 0;
1506 goto out_failed;
1507 }
1508 }
1509 BUG_ON(ret);
1510
1511 /* step three, lock the state bits for the whole range */
Chris Mason9655d292009-09-02 15:22:30 -04001512 lock_extent_bits(tree, delalloc_start, delalloc_end,
1513 0, &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001514
1515 /* then test to make sure it is all still delalloc */
1516 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001517 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001518 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001519 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1520 &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001521 __unlock_for_delalloc(inode, locked_page,
1522 delalloc_start, delalloc_end);
1523 cond_resched();
1524 goto again;
1525 }
Chris Mason9655d292009-09-02 15:22:30 -04001526 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001527 *start = delalloc_start;
1528 *end = delalloc_end;
1529out_failed:
1530 return found;
1531}
1532
1533int extent_clear_unlock_delalloc(struct inode *inode,
1534 struct extent_io_tree *tree,
1535 u64 start, u64 end, struct page *locked_page,
Chris Masona791e352009-10-08 11:27:10 -04001536 unsigned long op)
Chris Masonc8b97812008-10-29 14:49:59 -04001537{
1538 int ret;
1539 struct page *pages[16];
1540 unsigned long index = start >> PAGE_CACHE_SHIFT;
1541 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1542 unsigned long nr_pages = end_index - index + 1;
1543 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001544 int clear_bits = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001545
Chris Masona791e352009-10-08 11:27:10 -04001546 if (op & EXTENT_CLEAR_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001547 clear_bits |= EXTENT_LOCKED;
Chris Masona791e352009-10-08 11:27:10 -04001548 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001549 clear_bits |= EXTENT_DIRTY;
1550
Chris Masona791e352009-10-08 11:27:10 -04001551 if (op & EXTENT_CLEAR_DELALLOC)
Chris Mason771ed682008-11-06 22:02:51 -05001552 clear_bits |= EXTENT_DELALLOC;
1553
Chris Mason2c64c532009-09-02 15:04:12 -04001554 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
Josef Bacik32c00af2009-10-08 13:34:05 -04001555 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1556 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1557 EXTENT_SET_PRIVATE2)))
Chris Mason771ed682008-11-06 22:02:51 -05001558 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001559
Chris Masond3977122009-01-05 21:25:51 -05001560 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001561 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001562 min_t(unsigned long,
1563 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001564 for (i = 0; i < ret; i++) {
Chris Mason8b62b722009-09-02 16:53:46 -04001565
Chris Masona791e352009-10-08 11:27:10 -04001566 if (op & EXTENT_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001567 SetPagePrivate2(pages[i]);
1568
Chris Masonc8b97812008-10-29 14:49:59 -04001569 if (pages[i] == locked_page) {
1570 page_cache_release(pages[i]);
1571 continue;
1572 }
Chris Masona791e352009-10-08 11:27:10 -04001573 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001574 clear_page_dirty_for_io(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001575 if (op & EXTENT_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001576 set_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001577 if (op & EXTENT_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001578 end_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001579 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
Chris Mason771ed682008-11-06 22:02:51 -05001580 unlock_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001581 page_cache_release(pages[i]);
1582 }
1583 nr_pages -= ret;
1584 index += ret;
1585 cond_resched();
1586 }
1587 return 0;
1588}
Chris Masonc8b97812008-10-29 14:49:59 -04001589
Chris Masond352ac62008-09-29 15:18:18 -04001590/*
1591 * count the number of bytes in the tree that have a given bit(s)
1592 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1593 * cached. The total number found is returned.
1594 */
Chris Masond1310b22008-01-24 16:13:08 -05001595u64 count_range_bits(struct extent_io_tree *tree,
1596 u64 *start, u64 search_end, u64 max_bytes,
Chris Masonec29ed52011-02-23 16:23:20 -05001597 unsigned long bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001598{
1599 struct rb_node *node;
1600 struct extent_state *state;
1601 u64 cur_start = *start;
1602 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001603 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001604 int found = 0;
1605
1606 if (search_end <= cur_start) {
Chris Masond1310b22008-01-24 16:13:08 -05001607 WARN_ON(1);
1608 return 0;
1609 }
1610
Chris Masoncad321a2008-12-17 14:51:42 -05001611 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001612 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1613 total_bytes = tree->dirty_bytes;
1614 goto out;
1615 }
1616 /*
1617 * this search will find all the extents that end after
1618 * our range starts.
1619 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001620 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001621 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001622 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001623
Chris Masond3977122009-01-05 21:25:51 -05001624 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001625 state = rb_entry(node, struct extent_state, rb_node);
1626 if (state->start > search_end)
1627 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001628 if (contig && found && state->start > last + 1)
1629 break;
1630 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001631 total_bytes += min(search_end, state->end) + 1 -
1632 max(cur_start, state->start);
1633 if (total_bytes >= max_bytes)
1634 break;
1635 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001636 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001637 found = 1;
1638 }
Chris Masonec29ed52011-02-23 16:23:20 -05001639 last = state->end;
1640 } else if (contig && found) {
1641 break;
Chris Masond1310b22008-01-24 16:13:08 -05001642 }
1643 node = rb_next(node);
1644 if (!node)
1645 break;
1646 }
1647out:
Chris Masoncad321a2008-12-17 14:51:42 -05001648 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001649 return total_bytes;
1650}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001651
Chris Masond352ac62008-09-29 15:18:18 -04001652/*
1653 * set the private field for a given byte offset in the tree. If there isn't
1654 * an extent_state there already, this does nothing.
1655 */
Chris Masond1310b22008-01-24 16:13:08 -05001656int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1657{
1658 struct rb_node *node;
1659 struct extent_state *state;
1660 int ret = 0;
1661
Chris Masoncad321a2008-12-17 14:51:42 -05001662 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001663 /*
1664 * this search will find all the extents that end after
1665 * our range starts.
1666 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001667 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001668 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001669 ret = -ENOENT;
1670 goto out;
1671 }
1672 state = rb_entry(node, struct extent_state, rb_node);
1673 if (state->start != start) {
1674 ret = -ENOENT;
1675 goto out;
1676 }
1677 state->private = private;
1678out:
Chris Masoncad321a2008-12-17 14:51:42 -05001679 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001680 return ret;
1681}
1682
1683int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1684{
1685 struct rb_node *node;
1686 struct extent_state *state;
1687 int ret = 0;
1688
Chris Masoncad321a2008-12-17 14:51:42 -05001689 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001690 /*
1691 * this search will find all the extents that end after
1692 * our range starts.
1693 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001694 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001695 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001696 ret = -ENOENT;
1697 goto out;
1698 }
1699 state = rb_entry(node, struct extent_state, rb_node);
1700 if (state->start != start) {
1701 ret = -ENOENT;
1702 goto out;
1703 }
1704 *private = state->private;
1705out:
Chris Masoncad321a2008-12-17 14:51:42 -05001706 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001707 return ret;
1708}
1709
1710/*
1711 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001712 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001713 * has the bits set. Otherwise, 1 is returned if any bit in the
1714 * range is found set.
1715 */
1716int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason9655d292009-09-02 15:22:30 -04001717 int bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001718{
1719 struct extent_state *state = NULL;
1720 struct rb_node *node;
1721 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001722
Chris Masoncad321a2008-12-17 14:51:42 -05001723 spin_lock(&tree->lock);
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001724 if (cached && cached->tree && cached->start <= start &&
1725 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04001726 node = &cached->rb_node;
1727 else
1728 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001729 while (node && start <= end) {
1730 state = rb_entry(node, struct extent_state, rb_node);
1731
1732 if (filled && state->start > start) {
1733 bitset = 0;
1734 break;
1735 }
1736
1737 if (state->start > end)
1738 break;
1739
1740 if (state->state & bits) {
1741 bitset = 1;
1742 if (!filled)
1743 break;
1744 } else if (filled) {
1745 bitset = 0;
1746 break;
1747 }
Chris Mason46562ce2009-09-23 20:23:16 -04001748
1749 if (state->end == (u64)-1)
1750 break;
1751
Chris Masond1310b22008-01-24 16:13:08 -05001752 start = state->end + 1;
1753 if (start > end)
1754 break;
1755 node = rb_next(node);
1756 if (!node) {
1757 if (filled)
1758 bitset = 0;
1759 break;
1760 }
1761 }
Chris Masoncad321a2008-12-17 14:51:42 -05001762 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001763 return bitset;
1764}
Chris Masond1310b22008-01-24 16:13:08 -05001765
1766/*
1767 * helper function to set a given page up to date if all the
1768 * extents in the tree for that page are up to date
1769 */
1770static int check_page_uptodate(struct extent_io_tree *tree,
1771 struct page *page)
1772{
1773 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1774 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001775 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001776 SetPageUptodate(page);
1777 return 0;
1778}
1779
1780/*
1781 * helper function to unlock a page if all the extents in the tree
1782 * for that page are unlocked
1783 */
1784static int check_page_locked(struct extent_io_tree *tree,
1785 struct page *page)
1786{
1787 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1788 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001789 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001790 unlock_page(page);
1791 return 0;
1792}
1793
1794/*
1795 * helper function to end page writeback if all the extents
1796 * in the tree for that page are done with writeback
1797 */
1798static int check_page_writeback(struct extent_io_tree *tree,
1799 struct page *page)
1800{
Chris Mason1edbb732009-09-02 13:24:36 -04001801 end_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05001802 return 0;
1803}
1804
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001805/*
1806 * When IO fails, either with EIO or csum verification fails, we
1807 * try other mirrors that might have a good copy of the data. This
1808 * io_failure_record is used to record state as we go through all the
1809 * mirrors. If another mirror has good data, the page is set up to date
1810 * and things continue. If a good mirror can't be found, the original
1811 * bio end_io callback is called to indicate things have failed.
1812 */
1813struct io_failure_record {
1814 struct page *page;
1815 u64 start;
1816 u64 len;
1817 u64 logical;
1818 unsigned long bio_flags;
1819 int this_mirror;
1820 int failed_mirror;
1821 int in_validation;
1822};
1823
1824static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1825 int did_repair)
1826{
1827 int ret;
1828 int err = 0;
1829 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1830
1831 set_state_private(failure_tree, rec->start, 0);
1832 ret = clear_extent_bits(failure_tree, rec->start,
1833 rec->start + rec->len - 1,
1834 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1835 if (ret)
1836 err = ret;
1837
1838 if (did_repair) {
1839 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1840 rec->start + rec->len - 1,
1841 EXTENT_DAMAGED, GFP_NOFS);
1842 if (ret && !err)
1843 err = ret;
1844 }
1845
1846 kfree(rec);
1847 return err;
1848}
1849
1850static void repair_io_failure_callback(struct bio *bio, int err)
1851{
1852 complete(bio->bi_private);
1853}
1854
1855/*
1856 * this bypasses the standard btrfs submit functions deliberately, as
1857 * the standard behavior is to write all copies in a raid setup. here we only
1858 * want to write the one bad copy. so we do the mapping for ourselves and issue
1859 * submit_bio directly.
1860 * to avoid any synchonization issues, wait for the data after writing, which
1861 * actually prevents the read that triggered the error from finishing.
1862 * currently, there can be no more than two copies of every data bit. thus,
1863 * exactly one rewrite is required.
1864 */
1865int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1866 u64 length, u64 logical, struct page *page,
1867 int mirror_num)
1868{
1869 struct bio *bio;
1870 struct btrfs_device *dev;
1871 DECLARE_COMPLETION_ONSTACK(compl);
1872 u64 map_length = 0;
1873 u64 sector;
1874 struct btrfs_bio *bbio = NULL;
1875 int ret;
1876
1877 BUG_ON(!mirror_num);
1878
1879 bio = bio_alloc(GFP_NOFS, 1);
1880 if (!bio)
1881 return -EIO;
1882 bio->bi_private = &compl;
1883 bio->bi_end_io = repair_io_failure_callback;
1884 bio->bi_size = 0;
1885 map_length = length;
1886
1887 ret = btrfs_map_block(map_tree, WRITE, logical,
1888 &map_length, &bbio, mirror_num);
1889 if (ret) {
1890 bio_put(bio);
1891 return -EIO;
1892 }
1893 BUG_ON(mirror_num != bbio->mirror_num);
1894 sector = bbio->stripes[mirror_num-1].physical >> 9;
1895 bio->bi_sector = sector;
1896 dev = bbio->stripes[mirror_num-1].dev;
1897 kfree(bbio);
1898 if (!dev || !dev->bdev || !dev->writeable) {
1899 bio_put(bio);
1900 return -EIO;
1901 }
1902 bio->bi_bdev = dev->bdev;
1903 bio_add_page(bio, page, length, start-page_offset(page));
Stefan Behrens21adbd52011-11-09 13:44:05 +01001904 btrfsic_submit_bio(WRITE_SYNC, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001905 wait_for_completion(&compl);
1906
1907 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1908 /* try to remap that extent elsewhere? */
1909 bio_put(bio);
1910 return -EIO;
1911 }
1912
1913 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
1914 "sector %llu)\n", page->mapping->host->i_ino, start,
1915 dev->name, sector);
1916
1917 bio_put(bio);
1918 return 0;
1919}
1920
1921/*
1922 * each time an IO finishes, we do a fast check in the IO failure tree
1923 * to see if we need to process or clean up an io_failure_record
1924 */
1925static int clean_io_failure(u64 start, struct page *page)
1926{
1927 u64 private;
1928 u64 private_failure;
1929 struct io_failure_record *failrec;
1930 struct btrfs_mapping_tree *map_tree;
1931 struct extent_state *state;
1932 int num_copies;
1933 int did_repair = 0;
1934 int ret;
1935 struct inode *inode = page->mapping->host;
1936
1937 private = 0;
1938 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1939 (u64)-1, 1, EXTENT_DIRTY, 0);
1940 if (!ret)
1941 return 0;
1942
1943 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1944 &private_failure);
1945 if (ret)
1946 return 0;
1947
1948 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1949 BUG_ON(!failrec->this_mirror);
1950
1951 if (failrec->in_validation) {
1952 /* there was no real error, just free the record */
1953 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1954 failrec->start);
1955 did_repair = 1;
1956 goto out;
1957 }
1958
1959 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1960 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1961 failrec->start,
1962 EXTENT_LOCKED);
1963 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1964
1965 if (state && state->start == failrec->start) {
1966 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1967 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1968 failrec->len);
1969 if (num_copies > 1) {
1970 ret = repair_io_failure(map_tree, start, failrec->len,
1971 failrec->logical, page,
1972 failrec->failed_mirror);
1973 did_repair = !ret;
1974 }
1975 }
1976
1977out:
1978 if (!ret)
1979 ret = free_io_failure(inode, failrec, did_repair);
1980
1981 return ret;
1982}
1983
1984/*
1985 * this is a generic handler for readpage errors (default
1986 * readpage_io_failed_hook). if other copies exist, read those and write back
1987 * good data to the failed position. does not investigate in remapping the
1988 * failed extent elsewhere, hoping the device will be smart enough to do this as
1989 * needed
1990 */
1991
1992static int bio_readpage_error(struct bio *failed_bio, struct page *page,
1993 u64 start, u64 end, int failed_mirror,
1994 struct extent_state *state)
1995{
1996 struct io_failure_record *failrec = NULL;
1997 u64 private;
1998 struct extent_map *em;
1999 struct inode *inode = page->mapping->host;
2000 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2001 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2002 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2003 struct bio *bio;
2004 int num_copies;
2005 int ret;
2006 int read_mode;
2007 u64 logical;
2008
2009 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2010
2011 ret = get_state_private(failure_tree, start, &private);
2012 if (ret) {
2013 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2014 if (!failrec)
2015 return -ENOMEM;
2016 failrec->start = start;
2017 failrec->len = end - start + 1;
2018 failrec->this_mirror = 0;
2019 failrec->bio_flags = 0;
2020 failrec->in_validation = 0;
2021
2022 read_lock(&em_tree->lock);
2023 em = lookup_extent_mapping(em_tree, start, failrec->len);
2024 if (!em) {
2025 read_unlock(&em_tree->lock);
2026 kfree(failrec);
2027 return -EIO;
2028 }
2029
2030 if (em->start > start || em->start + em->len < start) {
2031 free_extent_map(em);
2032 em = NULL;
2033 }
2034 read_unlock(&em_tree->lock);
2035
2036 if (!em || IS_ERR(em)) {
2037 kfree(failrec);
2038 return -EIO;
2039 }
2040 logical = start - em->start;
2041 logical = em->block_start + logical;
2042 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2043 logical = em->block_start;
2044 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2045 extent_set_compress_type(&failrec->bio_flags,
2046 em->compress_type);
2047 }
2048 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2049 "len=%llu\n", logical, start, failrec->len);
2050 failrec->logical = logical;
2051 free_extent_map(em);
2052
2053 /* set the bits in the private failure tree */
2054 ret = set_extent_bits(failure_tree, start, end,
2055 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2056 if (ret >= 0)
2057 ret = set_state_private(failure_tree, start,
2058 (u64)(unsigned long)failrec);
2059 /* set the bits in the inode's tree */
2060 if (ret >= 0)
2061 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2062 GFP_NOFS);
2063 if (ret < 0) {
2064 kfree(failrec);
2065 return ret;
2066 }
2067 } else {
2068 failrec = (struct io_failure_record *)(unsigned long)private;
2069 pr_debug("bio_readpage_error: (found) logical=%llu, "
2070 "start=%llu, len=%llu, validation=%d\n",
2071 failrec->logical, failrec->start, failrec->len,
2072 failrec->in_validation);
2073 /*
2074 * when data can be on disk more than twice, add to failrec here
2075 * (e.g. with a list for failed_mirror) to make
2076 * clean_io_failure() clean all those errors at once.
2077 */
2078 }
2079 num_copies = btrfs_num_copies(
2080 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2081 failrec->logical, failrec->len);
2082 if (num_copies == 1) {
2083 /*
2084 * we only have a single copy of the data, so don't bother with
2085 * all the retry and error correction code that follows. no
2086 * matter what the error is, it is very likely to persist.
2087 */
2088 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2089 "state=%p, num_copies=%d, next_mirror %d, "
2090 "failed_mirror %d\n", state, num_copies,
2091 failrec->this_mirror, failed_mirror);
2092 free_io_failure(inode, failrec, 0);
2093 return -EIO;
2094 }
2095
2096 if (!state) {
2097 spin_lock(&tree->lock);
2098 state = find_first_extent_bit_state(tree, failrec->start,
2099 EXTENT_LOCKED);
2100 if (state && state->start != failrec->start)
2101 state = NULL;
2102 spin_unlock(&tree->lock);
2103 }
2104
2105 /*
2106 * there are two premises:
2107 * a) deliver good data to the caller
2108 * b) correct the bad sectors on disk
2109 */
2110 if (failed_bio->bi_vcnt > 1) {
2111 /*
2112 * to fulfill b), we need to know the exact failing sectors, as
2113 * we don't want to rewrite any more than the failed ones. thus,
2114 * we need separate read requests for the failed bio
2115 *
2116 * if the following BUG_ON triggers, our validation request got
2117 * merged. we need separate requests for our algorithm to work.
2118 */
2119 BUG_ON(failrec->in_validation);
2120 failrec->in_validation = 1;
2121 failrec->this_mirror = failed_mirror;
2122 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2123 } else {
2124 /*
2125 * we're ready to fulfill a) and b) alongside. get a good copy
2126 * of the failed sector and if we succeed, we have setup
2127 * everything for repair_io_failure to do the rest for us.
2128 */
2129 if (failrec->in_validation) {
2130 BUG_ON(failrec->this_mirror != failed_mirror);
2131 failrec->in_validation = 0;
2132 failrec->this_mirror = 0;
2133 }
2134 failrec->failed_mirror = failed_mirror;
2135 failrec->this_mirror++;
2136 if (failrec->this_mirror == failed_mirror)
2137 failrec->this_mirror++;
2138 read_mode = READ_SYNC;
2139 }
2140
2141 if (!state || failrec->this_mirror > num_copies) {
2142 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2143 "next_mirror %d, failed_mirror %d\n", state,
2144 num_copies, failrec->this_mirror, failed_mirror);
2145 free_io_failure(inode, failrec, 0);
2146 return -EIO;
2147 }
2148
2149 bio = bio_alloc(GFP_NOFS, 1);
2150 bio->bi_private = state;
2151 bio->bi_end_io = failed_bio->bi_end_io;
2152 bio->bi_sector = failrec->logical >> 9;
2153 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2154 bio->bi_size = 0;
2155
2156 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2157
2158 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2159 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2160 failrec->this_mirror, num_copies, failrec->in_validation);
2161
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002162 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2163 failrec->this_mirror,
2164 failrec->bio_flags, 0);
2165 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002166}
2167
Chris Masond1310b22008-01-24 16:13:08 -05002168/* lots and lots of room for performance fixes in the end_bio funcs */
2169
Jeff Mahoney87826df2012-02-15 16:23:57 +01002170int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2171{
2172 int uptodate = (err == 0);
2173 struct extent_io_tree *tree;
2174 int ret;
2175
2176 tree = &BTRFS_I(page->mapping->host)->io_tree;
2177
2178 if (tree->ops && tree->ops->writepage_end_io_hook) {
2179 ret = tree->ops->writepage_end_io_hook(page, start,
2180 end, NULL, uptodate);
2181 if (ret)
2182 uptodate = 0;
2183 }
2184
2185 if (!uptodate && tree->ops &&
2186 tree->ops->writepage_io_failed_hook) {
2187 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2188 start, end, NULL);
2189 /* Writeback already completed */
2190 if (ret == 0)
2191 return 1;
2192 }
2193
2194 if (!uptodate) {
2195 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2196 ClearPageUptodate(page);
2197 SetPageError(page);
2198 }
2199 return 0;
2200}
2201
Chris Masond1310b22008-01-24 16:13:08 -05002202/*
2203 * after a writepage IO is done, we need to:
2204 * clear the uptodate bits on error
2205 * clear the writeback bits in the extent tree for this IO
2206 * end_page_writeback if the page has no more pending IO
2207 *
2208 * Scheduling is not allowed, so the extent state tree is expected
2209 * to have one and only one object corresponding to this IO.
2210 */
Chris Masond1310b22008-01-24 16:13:08 -05002211static void end_bio_extent_writepage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002212{
Chris Masond1310b22008-01-24 16:13:08 -05002213 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04002214 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05002215 u64 start;
2216 u64 end;
2217 int whole_page;
2218
Chris Masond1310b22008-01-24 16:13:08 -05002219 do {
2220 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04002221 tree = &BTRFS_I(page->mapping->host)->io_tree;
2222
Chris Masond1310b22008-01-24 16:13:08 -05002223 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2224 bvec->bv_offset;
2225 end = start + bvec->bv_len - 1;
2226
2227 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2228 whole_page = 1;
2229 else
2230 whole_page = 0;
2231
2232 if (--bvec >= bio->bi_io_vec)
2233 prefetchw(&bvec->bv_page->flags);
Chris Mason1259ab72008-05-12 13:39:03 -04002234
Jeff Mahoney87826df2012-02-15 16:23:57 +01002235 if (end_extent_writepage(page, err, start, end))
2236 continue;
Chris Mason70dec802008-01-29 09:59:12 -05002237
Chris Masond1310b22008-01-24 16:13:08 -05002238 if (whole_page)
2239 end_page_writeback(page);
2240 else
2241 check_page_writeback(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05002242 } while (bvec >= bio->bi_io_vec);
Chris Mason2b1f55b2008-09-24 11:48:04 -04002243
Chris Masond1310b22008-01-24 16:13:08 -05002244 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002245}
2246
2247/*
2248 * after a readpage IO is done, we need to:
2249 * clear the uptodate bits on error
2250 * set the uptodate bits if things worked
2251 * set the page up to date if all extents in the tree are uptodate
2252 * clear the lock bit in the extent tree
2253 * unlock the page if there are no other extents locked for it
2254 *
2255 * Scheduling is not allowed, so the extent state tree is expected
2256 * to have one and only one object corresponding to this IO.
2257 */
Chris Masond1310b22008-01-24 16:13:08 -05002258static void end_bio_extent_readpage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002259{
2260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Mason4125bf72010-02-03 18:18:45 +00002261 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2262 struct bio_vec *bvec = bio->bi_io_vec;
David Woodhouse902b22f2008-08-20 08:51:49 -04002263 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05002264 u64 start;
2265 u64 end;
2266 int whole_page;
2267 int ret;
2268
Chris Masond20f7042008-12-08 16:58:54 -05002269 if (err)
2270 uptodate = 0;
2271
Chris Masond1310b22008-01-24 16:13:08 -05002272 do {
2273 struct page *page = bvec->bv_page;
Arne Jansen507903b2011-04-06 10:02:20 +00002274 struct extent_state *cached = NULL;
2275 struct extent_state *state;
2276
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002277 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2278 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2279 (long int)bio->bi_bdev);
David Woodhouse902b22f2008-08-20 08:51:49 -04002280 tree = &BTRFS_I(page->mapping->host)->io_tree;
2281
Chris Masond1310b22008-01-24 16:13:08 -05002282 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2283 bvec->bv_offset;
2284 end = start + bvec->bv_len - 1;
2285
2286 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2287 whole_page = 1;
2288 else
2289 whole_page = 0;
2290
Chris Mason4125bf72010-02-03 18:18:45 +00002291 if (++bvec <= bvec_end)
Chris Masond1310b22008-01-24 16:13:08 -05002292 prefetchw(&bvec->bv_page->flags);
2293
Arne Jansen507903b2011-04-06 10:02:20 +00002294 spin_lock(&tree->lock);
Chris Mason0d399202011-04-16 06:55:39 -04002295 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
Chris Mason109b36a2011-04-12 13:57:39 -04002296 if (state && state->start == start) {
Arne Jansen507903b2011-04-06 10:02:20 +00002297 /*
2298 * take a reference on the state, unlock will drop
2299 * the ref
2300 */
2301 cache_state(state, &cached);
2302 }
2303 spin_unlock(&tree->lock);
2304
Chris Masond1310b22008-01-24 16:13:08 -05002305 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
Chris Mason70dec802008-01-29 09:59:12 -05002306 ret = tree->ops->readpage_end_io_hook(page, start, end,
Arne Jansen507903b2011-04-06 10:02:20 +00002307 state);
Chris Masond1310b22008-01-24 16:13:08 -05002308 if (ret)
2309 uptodate = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002310 else
2311 clean_io_failure(start, page);
Chris Masond1310b22008-01-24 16:13:08 -05002312 }
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002313 if (!uptodate) {
Jan Schmidt32240a92011-11-20 07:33:38 -05002314 int failed_mirror;
2315 failed_mirror = (int)(unsigned long)bio->bi_bdev;
Jan Schmidtf4a8e652011-12-01 09:30:36 -05002316 /*
2317 * The generic bio_readpage_error handles errors the
2318 * following way: If possible, new read requests are
2319 * created and submitted and will end up in
2320 * end_bio_extent_readpage as well (if we're lucky, not
2321 * in the !uptodate case). In that case it returns 0 and
2322 * we just go on with the next page in our bio. If it
2323 * can't handle the error it will return -EIO and we
2324 * remain responsible for that page.
2325 */
2326 ret = bio_readpage_error(bio, page, start, end,
2327 failed_mirror, NULL);
Chris Mason7e383262008-04-09 16:28:12 -04002328 if (ret == 0) {
Jan Schmidtf4a8e652011-12-01 09:30:36 -05002329error_handled:
Chris Mason3b951512008-04-17 11:29:12 -04002330 uptodate =
2331 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masond20f7042008-12-08 16:58:54 -05002332 if (err)
2333 uptodate = 0;
Arne Jansen507903b2011-04-06 10:02:20 +00002334 uncache_state(&cached);
Chris Mason7e383262008-04-09 16:28:12 -04002335 continue;
2336 }
Jan Schmidtf4a8e652011-12-01 09:30:36 -05002337 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2338 ret = tree->ops->readpage_io_failed_hook(
2339 bio, page, start, end,
2340 failed_mirror, state);
2341 if (ret == 0)
2342 goto error_handled;
2343 }
Chris Mason7e383262008-04-09 16:28:12 -04002344 }
Chris Mason70dec802008-01-29 09:59:12 -05002345
Chris Mason771ed682008-11-06 22:02:51 -05002346 if (uptodate) {
Arne Jansen507903b2011-04-06 10:02:20 +00002347 set_extent_uptodate(tree, start, end, &cached,
David Woodhouse902b22f2008-08-20 08:51:49 -04002348 GFP_ATOMIC);
Chris Mason771ed682008-11-06 22:02:51 -05002349 }
Arne Jansen507903b2011-04-06 10:02:20 +00002350 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05002351
Chris Mason70dec802008-01-29 09:59:12 -05002352 if (whole_page) {
2353 if (uptodate) {
2354 SetPageUptodate(page);
2355 } else {
2356 ClearPageUptodate(page);
2357 SetPageError(page);
2358 }
Chris Masond1310b22008-01-24 16:13:08 -05002359 unlock_page(page);
Chris Mason70dec802008-01-29 09:59:12 -05002360 } else {
2361 if (uptodate) {
2362 check_page_uptodate(tree, page);
2363 } else {
2364 ClearPageUptodate(page);
2365 SetPageError(page);
2366 }
Chris Masond1310b22008-01-24 16:13:08 -05002367 check_page_locked(tree, page);
Chris Mason70dec802008-01-29 09:59:12 -05002368 }
Chris Mason4125bf72010-02-03 18:18:45 +00002369 } while (bvec <= bvec_end);
Chris Masond1310b22008-01-24 16:13:08 -05002370
2371 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002372}
2373
Miao Xie88f794e2010-11-22 03:02:55 +00002374struct bio *
2375btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2376 gfp_t gfp_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002377{
2378 struct bio *bio;
2379
2380 bio = bio_alloc(gfp_flags, nr_vecs);
2381
2382 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2383 while (!bio && (nr_vecs /= 2))
2384 bio = bio_alloc(gfp_flags, nr_vecs);
2385 }
2386
2387 if (bio) {
Chris Masone1c4b742008-04-22 13:26:46 -04002388 bio->bi_size = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002389 bio->bi_bdev = bdev;
2390 bio->bi_sector = first_sector;
2391 }
2392 return bio;
2393}
2394
Chris Masonc8b97812008-10-29 14:49:59 -04002395static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
2396 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002397{
Chris Masond1310b22008-01-24 16:13:08 -05002398 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05002399 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2400 struct page *page = bvec->bv_page;
2401 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05002402 u64 start;
Chris Mason70dec802008-01-29 09:59:12 -05002403
2404 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
Chris Mason70dec802008-01-29 09:59:12 -05002405
David Woodhouse902b22f2008-08-20 08:51:49 -04002406 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002407
2408 bio_get(bio);
2409
Chris Mason065631f2008-02-20 12:07:25 -05002410 if (tree->ops && tree->ops->submit_bio_hook)
liubo6b82ce82011-01-26 06:21:39 +00002411 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masoneaf25d92010-05-25 09:48:28 -04002412 mirror_num, bio_flags, start);
Chris Mason0b86a832008-03-24 15:01:56 -04002413 else
Stefan Behrens21adbd52011-11-09 13:44:05 +01002414 btrfsic_submit_bio(rw, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002415
Chris Masond1310b22008-01-24 16:13:08 -05002416 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2417 ret = -EOPNOTSUPP;
2418 bio_put(bio);
2419 return ret;
2420}
2421
2422static int submit_extent_page(int rw, struct extent_io_tree *tree,
2423 struct page *page, sector_t sector,
2424 size_t size, unsigned long offset,
2425 struct block_device *bdev,
2426 struct bio **bio_ret,
2427 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04002428 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04002429 int mirror_num,
2430 unsigned long prev_bio_flags,
2431 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002432{
2433 int ret = 0;
2434 struct bio *bio;
2435 int nr;
Chris Masonc8b97812008-10-29 14:49:59 -04002436 int contig = 0;
2437 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2438 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Chris Mason5b050f02008-11-11 09:34:41 -05002439 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05002440
2441 if (bio_ret && *bio_ret) {
2442 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04002443 if (old_compressed)
2444 contig = bio->bi_sector == sector;
2445 else
2446 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2447 sector;
2448
2449 if (prev_bio_flags != bio_flags || !contig ||
Chris Mason239b14b2008-03-24 15:02:07 -04002450 (tree->ops && tree->ops->merge_bio_hook &&
Chris Masonc8b97812008-10-29 14:49:59 -04002451 tree->ops->merge_bio_hook(page, offset, page_size, bio,
2452 bio_flags)) ||
2453 bio_add_page(bio, page, page_size, offset) < page_size) {
2454 ret = submit_one_bio(rw, bio, mirror_num,
2455 prev_bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002456 bio = NULL;
2457 } else {
2458 return 0;
2459 }
2460 }
Chris Masonc8b97812008-10-29 14:49:59 -04002461 if (this_compressed)
2462 nr = BIO_MAX_PAGES;
2463 else
2464 nr = bio_get_nr_vecs(bdev);
2465
Miao Xie88f794e2010-11-22 03:02:55 +00002466 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
Tsutomu Itoh5df67082011-02-01 09:17:35 +00002467 if (!bio)
2468 return -ENOMEM;
Chris Mason70dec802008-01-29 09:59:12 -05002469
Chris Masonc8b97812008-10-29 14:49:59 -04002470 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05002471 bio->bi_end_io = end_io_func;
2472 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05002473
Chris Masond3977122009-01-05 21:25:51 -05002474 if (bio_ret)
Chris Masond1310b22008-01-24 16:13:08 -05002475 *bio_ret = bio;
Chris Masond3977122009-01-05 21:25:51 -05002476 else
Chris Masonc8b97812008-10-29 14:49:59 -04002477 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002478
2479 return ret;
2480}
2481
2482void set_page_extent_mapped(struct page *page)
2483{
2484 if (!PagePrivate(page)) {
2485 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002486 page_cache_get(page);
Chris Mason6af118c2008-07-22 11:18:07 -04002487 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05002488 }
2489}
2490
Christoph Hellwigb2950862008-12-02 09:54:17 -05002491static void set_page_extent_head(struct page *page, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05002492{
Chris Masoneb14ab82011-02-10 12:35:00 -05002493 WARN_ON(!PagePrivate(page));
Chris Masond1310b22008-01-24 16:13:08 -05002494 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
2495}
2496
2497/*
2498 * basic readpage implementation. Locked extent state structs are inserted
2499 * into the tree that are removed when the IO is done (by the end_io
2500 * handlers)
2501 */
2502static int __extent_read_full_page(struct extent_io_tree *tree,
2503 struct page *page,
2504 get_extent_t *get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04002505 struct bio **bio, int mirror_num,
2506 unsigned long *bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002507{
2508 struct inode *inode = page->mapping->host;
2509 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2510 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2511 u64 end;
2512 u64 cur = start;
2513 u64 extent_offset;
2514 u64 last_byte = i_size_read(inode);
2515 u64 block_start;
2516 u64 cur_end;
2517 sector_t sector;
2518 struct extent_map *em;
2519 struct block_device *bdev;
Josef Bacik11c65dc2010-05-23 11:07:21 -04002520 struct btrfs_ordered_extent *ordered;
Chris Masond1310b22008-01-24 16:13:08 -05002521 int ret;
2522 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02002523 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002524 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04002525 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05002526 size_t blocksize = inode->i_sb->s_blocksize;
Chris Masonc8b97812008-10-29 14:49:59 -04002527 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002528
2529 set_page_extent_mapped(page);
2530
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002531 if (!PageUptodate(page)) {
2532 if (cleancache_get_page(page) == 0) {
2533 BUG_ON(blocksize != PAGE_SIZE);
2534 goto out;
2535 }
2536 }
2537
Chris Masond1310b22008-01-24 16:13:08 -05002538 end = page_end;
Josef Bacik11c65dc2010-05-23 11:07:21 -04002539 while (1) {
2540 lock_extent(tree, start, end, GFP_NOFS);
2541 ordered = btrfs_lookup_ordered_extent(inode, start);
2542 if (!ordered)
2543 break;
2544 unlock_extent(tree, start, end, GFP_NOFS);
2545 btrfs_start_ordered_extent(inode, ordered, 1);
2546 btrfs_put_ordered_extent(ordered);
2547 }
Chris Masond1310b22008-01-24 16:13:08 -05002548
Chris Masonc8b97812008-10-29 14:49:59 -04002549 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2550 char *userpage;
2551 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2552
2553 if (zero_offset) {
2554 iosize = PAGE_CACHE_SIZE - zero_offset;
2555 userpage = kmap_atomic(page, KM_USER0);
2556 memset(userpage + zero_offset, 0, iosize);
2557 flush_dcache_page(page);
2558 kunmap_atomic(userpage, KM_USER0);
2559 }
2560 }
Chris Masond1310b22008-01-24 16:13:08 -05002561 while (cur <= end) {
2562 if (cur >= last_byte) {
2563 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002564 struct extent_state *cached = NULL;
2565
David Sterba306e16c2011-04-19 14:29:38 +02002566 iosize = PAGE_CACHE_SIZE - pg_offset;
Chris Masond1310b22008-01-24 16:13:08 -05002567 userpage = kmap_atomic(page, KM_USER0);
David Sterba306e16c2011-04-19 14:29:38 +02002568 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002569 flush_dcache_page(page);
2570 kunmap_atomic(userpage, KM_USER0);
2571 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002572 &cached, GFP_NOFS);
2573 unlock_extent_cached(tree, cur, cur + iosize - 1,
2574 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002575 break;
2576 }
David Sterba306e16c2011-04-19 14:29:38 +02002577 em = get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002578 end - cur + 1, 0);
David Sterbac7040052011-04-19 18:00:01 +02002579 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002580 SetPageError(page);
2581 unlock_extent(tree, cur, end, GFP_NOFS);
2582 break;
2583 }
Chris Masond1310b22008-01-24 16:13:08 -05002584 extent_offset = cur - em->start;
2585 BUG_ON(extent_map_end(em) <= cur);
2586 BUG_ON(end < cur);
2587
Li Zefan261507a02010-12-17 14:21:50 +08002588 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Chris Masonc8b97812008-10-29 14:49:59 -04002589 this_bio_flag = EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08002590 extent_set_compress_type(&this_bio_flag,
2591 em->compress_type);
2592 }
Chris Masonc8b97812008-10-29 14:49:59 -04002593
Chris Masond1310b22008-01-24 16:13:08 -05002594 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2595 cur_end = min(extent_map_end(em) - 1, end);
2596 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002597 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2598 disk_io_size = em->block_len;
2599 sector = em->block_start >> 9;
2600 } else {
2601 sector = (em->block_start + extent_offset) >> 9;
2602 disk_io_size = iosize;
2603 }
Chris Masond1310b22008-01-24 16:13:08 -05002604 bdev = em->bdev;
2605 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002606 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2607 block_start = EXTENT_MAP_HOLE;
Chris Masond1310b22008-01-24 16:13:08 -05002608 free_extent_map(em);
2609 em = NULL;
2610
2611 /* we've found a hole, just zero and go on */
2612 if (block_start == EXTENT_MAP_HOLE) {
2613 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002614 struct extent_state *cached = NULL;
2615
Chris Masond1310b22008-01-24 16:13:08 -05002616 userpage = kmap_atomic(page, KM_USER0);
David Sterba306e16c2011-04-19 14:29:38 +02002617 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002618 flush_dcache_page(page);
2619 kunmap_atomic(userpage, KM_USER0);
2620
2621 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002622 &cached, GFP_NOFS);
2623 unlock_extent_cached(tree, cur, cur + iosize - 1,
2624 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002625 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002626 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002627 continue;
2628 }
2629 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04002630 if (test_range_bit(tree, cur, cur_end,
2631 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04002632 check_page_uptodate(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05002633 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2634 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002635 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002636 continue;
2637 }
Chris Mason70dec802008-01-29 09:59:12 -05002638 /* we have an inline extent but it didn't get marked up
2639 * to date. Error out
2640 */
2641 if (block_start == EXTENT_MAP_INLINE) {
2642 SetPageError(page);
2643 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2644 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002645 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05002646 continue;
2647 }
Chris Masond1310b22008-01-24 16:13:08 -05002648
2649 ret = 0;
2650 if (tree->ops && tree->ops->readpage_io_hook) {
2651 ret = tree->ops->readpage_io_hook(page, cur,
2652 cur + iosize - 1);
2653 }
2654 if (!ret) {
Chris Mason89642222008-07-24 09:41:53 -04002655 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2656 pnr -= page->index;
Chris Masond1310b22008-01-24 16:13:08 -05002657 ret = submit_extent_page(READ, tree, page,
David Sterba306e16c2011-04-19 14:29:38 +02002658 sector, disk_io_size, pg_offset,
Chris Mason89642222008-07-24 09:41:53 -04002659 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04002660 end_bio_extent_readpage, mirror_num,
2661 *bio_flags,
2662 this_bio_flag);
Chris Mason89642222008-07-24 09:41:53 -04002663 nr++;
Chris Masonc8b97812008-10-29 14:49:59 -04002664 *bio_flags = this_bio_flag;
Chris Masond1310b22008-01-24 16:13:08 -05002665 }
2666 if (ret)
2667 SetPageError(page);
2668 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002669 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002670 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002671out:
Chris Masond1310b22008-01-24 16:13:08 -05002672 if (!nr) {
2673 if (!PageError(page))
2674 SetPageUptodate(page);
2675 unlock_page(page);
2676 }
2677 return 0;
2678}
2679
2680int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002681 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05002682{
2683 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04002684 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002685 int ret;
2686
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002687 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
Chris Masonc8b97812008-10-29 14:49:59 -04002688 &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002689 if (bio)
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002690 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002691 return ret;
2692}
Chris Masond1310b22008-01-24 16:13:08 -05002693
Chris Mason11c83492009-04-20 15:50:09 -04002694static noinline void update_nr_written(struct page *page,
2695 struct writeback_control *wbc,
2696 unsigned long nr_written)
2697{
2698 wbc->nr_to_write -= nr_written;
2699 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2700 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2701 page->mapping->writeback_index = page->index + nr_written;
2702}
2703
Chris Masond1310b22008-01-24 16:13:08 -05002704/*
2705 * the writepage semantics are similar to regular writepage. extent
2706 * records are inserted to lock ranges in the tree, and as dirty areas
2707 * are found, they are marked writeback. Then the lock bits are removed
2708 * and the end_io handler clears the writeback ranges
2709 */
2710static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2711 void *data)
2712{
2713 struct inode *inode = page->mapping->host;
2714 struct extent_page_data *epd = data;
2715 struct extent_io_tree *tree = epd->tree;
2716 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2717 u64 delalloc_start;
2718 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2719 u64 end;
2720 u64 cur = start;
2721 u64 extent_offset;
2722 u64 last_byte = i_size_read(inode);
2723 u64 block_start;
2724 u64 iosize;
2725 sector_t sector;
Chris Mason2c64c532009-09-02 15:04:12 -04002726 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002727 struct extent_map *em;
2728 struct block_device *bdev;
2729 int ret;
2730 int nr = 0;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002731 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002732 size_t blocksize;
2733 loff_t i_size = i_size_read(inode);
2734 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2735 u64 nr_delalloc;
2736 u64 delalloc_end;
Chris Masonc8b97812008-10-29 14:49:59 -04002737 int page_started;
2738 int compressed;
Chris Masonffbd5172009-04-20 15:50:09 -04002739 int write_flags;
Chris Mason771ed682008-11-06 22:02:51 -05002740 unsigned long nr_written = 0;
Josef Bacik9e487102011-08-01 12:08:18 -04002741 bool fill_delalloc = true;
Chris Masond1310b22008-01-24 16:13:08 -05002742
Chris Masonffbd5172009-04-20 15:50:09 -04002743 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboe721a9602011-03-09 11:56:30 +01002744 write_flags = WRITE_SYNC;
Chris Masonffbd5172009-04-20 15:50:09 -04002745 else
2746 write_flags = WRITE;
2747
liubo1abe9b82011-03-24 11:18:59 +00002748 trace___extent_writepage(page, inode, wbc);
2749
Chris Masond1310b22008-01-24 16:13:08 -05002750 WARN_ON(!PageLocked(page));
Chris Masonbf0da8c2011-11-04 12:29:37 -04002751
2752 ClearPageError(page);
2753
Chris Mason7f3c74f2008-07-18 12:01:11 -04002754 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason211c17f2008-05-15 09:13:45 -04002755 if (page->index > end_index ||
Chris Mason7f3c74f2008-07-18 12:01:11 -04002756 (page->index == end_index && !pg_offset)) {
Chris Mason39be25c2008-11-10 11:50:50 -05002757 page->mapping->a_ops->invalidatepage(page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002758 unlock_page(page);
2759 return 0;
2760 }
2761
2762 if (page->index == end_index) {
2763 char *userpage;
2764
Chris Masond1310b22008-01-24 16:13:08 -05002765 userpage = kmap_atomic(page, KM_USER0);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002766 memset(userpage + pg_offset, 0,
2767 PAGE_CACHE_SIZE - pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05002768 kunmap_atomic(userpage, KM_USER0);
Chris Mason211c17f2008-05-15 09:13:45 -04002769 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002770 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002771 pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002772
2773 set_page_extent_mapped(page);
2774
Josef Bacik9e487102011-08-01 12:08:18 -04002775 if (!tree->ops || !tree->ops->fill_delalloc)
2776 fill_delalloc = false;
2777
Chris Masond1310b22008-01-24 16:13:08 -05002778 delalloc_start = start;
2779 delalloc_end = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002780 page_started = 0;
Josef Bacik9e487102011-08-01 12:08:18 -04002781 if (!epd->extent_locked && fill_delalloc) {
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002782 u64 delalloc_to_write = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002783 /*
2784 * make sure the wbc mapping index is at least updated
2785 * to this page.
2786 */
2787 update_nr_written(page, wbc, 0);
2788
Chris Masond3977122009-01-05 21:25:51 -05002789 while (delalloc_end < page_end) {
Chris Mason771ed682008-11-06 22:02:51 -05002790 nr_delalloc = find_lock_delalloc_range(inode, tree,
Chris Masonc8b97812008-10-29 14:49:59 -04002791 page,
2792 &delalloc_start,
Chris Masond1310b22008-01-24 16:13:08 -05002793 &delalloc_end,
2794 128 * 1024 * 1024);
Chris Mason771ed682008-11-06 22:02:51 -05002795 if (nr_delalloc == 0) {
2796 delalloc_start = delalloc_end + 1;
2797 continue;
2798 }
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002799 ret = tree->ops->fill_delalloc(inode, page,
2800 delalloc_start,
2801 delalloc_end,
2802 &page_started,
2803 &nr_written);
2804 BUG_ON(ret);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002805 /*
2806 * delalloc_end is already one less than the total
2807 * length, so we don't subtract one from
2808 * PAGE_CACHE_SIZE
2809 */
2810 delalloc_to_write += (delalloc_end - delalloc_start +
2811 PAGE_CACHE_SIZE) >>
2812 PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002813 delalloc_start = delalloc_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002814 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002815 if (wbc->nr_to_write < delalloc_to_write) {
2816 int thresh = 8192;
2817
2818 if (delalloc_to_write < thresh * 2)
2819 thresh = delalloc_to_write;
2820 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2821 thresh);
2822 }
Chris Masonc8b97812008-10-29 14:49:59 -04002823
Chris Mason771ed682008-11-06 22:02:51 -05002824 /* did the fill delalloc function already unlock and start
2825 * the IO?
2826 */
2827 if (page_started) {
2828 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002829 /*
2830 * we've unlocked the page, so we can't update
2831 * the mapping's writeback index, just update
2832 * nr_to_write.
2833 */
2834 wbc->nr_to_write -= nr_written;
2835 goto done_unlocked;
Chris Mason771ed682008-11-06 22:02:51 -05002836 }
Chris Masonc8b97812008-10-29 14:49:59 -04002837 }
Chris Mason247e7432008-07-17 12:53:51 -04002838 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04002839 ret = tree->ops->writepage_start_hook(page, start,
2840 page_end);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002841 if (ret) {
2842 /* Fixup worker will requeue */
2843 if (ret == -EBUSY)
2844 wbc->pages_skipped++;
2845 else
2846 redirty_page_for_writepage(wbc, page);
Chris Mason11c83492009-04-20 15:50:09 -04002847 update_nr_written(page, wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04002848 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002849 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002850 goto done_unlocked;
Chris Mason247e7432008-07-17 12:53:51 -04002851 }
2852 }
2853
Chris Mason11c83492009-04-20 15:50:09 -04002854 /*
2855 * we don't want to touch the inode after unlocking the page,
2856 * so we update the mapping writeback index now
2857 */
2858 update_nr_written(page, wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05002859
Chris Masond1310b22008-01-24 16:13:08 -05002860 end = page_end;
Chris Masond1310b22008-01-24 16:13:08 -05002861 if (last_byte <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002862 if (tree->ops && tree->ops->writepage_end_io_hook)
2863 tree->ops->writepage_end_io_hook(page, start,
2864 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002865 goto done;
2866 }
2867
Chris Masond1310b22008-01-24 16:13:08 -05002868 blocksize = inode->i_sb->s_blocksize;
2869
2870 while (cur <= end) {
2871 if (cur >= last_byte) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002872 if (tree->ops && tree->ops->writepage_end_io_hook)
2873 tree->ops->writepage_end_io_hook(page, cur,
2874 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002875 break;
2876 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002877 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002878 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02002879 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002880 SetPageError(page);
2881 break;
2882 }
2883
2884 extent_offset = cur - em->start;
2885 BUG_ON(extent_map_end(em) <= cur);
2886 BUG_ON(end < cur);
2887 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2888 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2889 sector = (em->block_start + extent_offset) >> 9;
2890 bdev = em->bdev;
2891 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04002892 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05002893 free_extent_map(em);
2894 em = NULL;
2895
Chris Masonc8b97812008-10-29 14:49:59 -04002896 /*
2897 * compressed and inline extents are written through other
2898 * paths in the FS
2899 */
2900 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05002901 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04002902 /*
2903 * end_io notification does not happen here for
2904 * compressed extents
2905 */
2906 if (!compressed && tree->ops &&
2907 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04002908 tree->ops->writepage_end_io_hook(page, cur,
2909 cur + iosize - 1,
2910 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002911 else if (compressed) {
2912 /* we don't want to end_page_writeback on
2913 * a compressed extent. this happens
2914 * elsewhere
2915 */
2916 nr++;
2917 }
2918
2919 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002920 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002921 continue;
2922 }
Chris Masond1310b22008-01-24 16:13:08 -05002923 /* leave this out until we have a page_mkwrite call */
2924 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
Chris Mason9655d292009-09-02 15:22:30 -04002925 EXTENT_DIRTY, 0, NULL)) {
Chris Masond1310b22008-01-24 16:13:08 -05002926 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002927 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002928 continue;
2929 }
Chris Masonc8b97812008-10-29 14:49:59 -04002930
Chris Masond1310b22008-01-24 16:13:08 -05002931 if (tree->ops && tree->ops->writepage_io_hook) {
2932 ret = tree->ops->writepage_io_hook(page, cur,
2933 cur + iosize - 1);
2934 } else {
2935 ret = 0;
2936 }
Chris Mason1259ab72008-05-12 13:39:03 -04002937 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05002938 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04002939 } else {
Chris Masond1310b22008-01-24 16:13:08 -05002940 unsigned long max_nr = end_index + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002941
Chris Masond1310b22008-01-24 16:13:08 -05002942 set_range_writeback(tree, cur, cur + iosize - 1);
2943 if (!PageWriteback(page)) {
Chris Masond3977122009-01-05 21:25:51 -05002944 printk(KERN_ERR "btrfs warning page %lu not "
2945 "writeback, cur %llu end %llu\n",
2946 page->index, (unsigned long long)cur,
Chris Masond1310b22008-01-24 16:13:08 -05002947 (unsigned long long)end);
2948 }
2949
Chris Masonffbd5172009-04-20 15:50:09 -04002950 ret = submit_extent_page(write_flags, tree, page,
2951 sector, iosize, pg_offset,
2952 bdev, &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04002953 end_bio_extent_writepage,
2954 0, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002955 if (ret)
2956 SetPageError(page);
2957 }
2958 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002959 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002960 nr++;
2961 }
2962done:
2963 if (nr == 0) {
2964 /* make sure the mapping tag for page dirty gets cleared */
2965 set_page_writeback(page);
2966 end_page_writeback(page);
2967 }
Chris Masond1310b22008-01-24 16:13:08 -05002968 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002969
Chris Mason11c83492009-04-20 15:50:09 -04002970done_unlocked:
2971
Chris Mason2c64c532009-09-02 15:04:12 -04002972 /* drop our reference on any cached states */
2973 free_extent_state(cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05002974 return 0;
2975}
2976
Chris Masond1310b22008-01-24 16:13:08 -05002977/**
Chris Mason4bef0842008-09-08 11:18:08 -04002978 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05002979 * @mapping: address space structure to write
2980 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2981 * @writepage: function called for each page
2982 * @data: data passed to writepage function
2983 *
2984 * If a page is already under I/O, write_cache_pages() skips it, even
2985 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2986 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2987 * and msync() need to guarantee that all the data which was dirty at the time
2988 * the call was made get new I/O started against them. If wbc->sync_mode is
2989 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2990 * existing IO to complete.
2991 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05002992static int extent_write_cache_pages(struct extent_io_tree *tree,
Chris Mason4bef0842008-09-08 11:18:08 -04002993 struct address_space *mapping,
2994 struct writeback_control *wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05002995 writepage_t writepage, void *data,
2996 void (*flush_fn)(void *))
Chris Masond1310b22008-01-24 16:13:08 -05002997{
Chris Masond1310b22008-01-24 16:13:08 -05002998 int ret = 0;
2999 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003000 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003001 struct pagevec pvec;
3002 int nr_pages;
3003 pgoff_t index;
3004 pgoff_t end; /* Inclusive */
3005 int scanned = 0;
Josef Bacikf7aaa062011-07-15 21:26:38 +00003006 int tag;
Chris Masond1310b22008-01-24 16:13:08 -05003007
Chris Masond1310b22008-01-24 16:13:08 -05003008 pagevec_init(&pvec, 0);
3009 if (wbc->range_cyclic) {
3010 index = mapping->writeback_index; /* Start from prev offset */
3011 end = -1;
3012 } else {
3013 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3014 end = wbc->range_end >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003015 scanned = 1;
3016 }
Josef Bacikf7aaa062011-07-15 21:26:38 +00003017 if (wbc->sync_mode == WB_SYNC_ALL)
3018 tag = PAGECACHE_TAG_TOWRITE;
3019 else
3020 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05003021retry:
Josef Bacikf7aaa062011-07-15 21:26:38 +00003022 if (wbc->sync_mode == WB_SYNC_ALL)
3023 tag_pages_for_writeback(mapping, index, end);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003024 while (!done && !nr_to_write_done && (index <= end) &&
Josef Bacikf7aaa062011-07-15 21:26:38 +00003025 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3026 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
Chris Masond1310b22008-01-24 16:13:08 -05003027 unsigned i;
3028
3029 scanned = 1;
3030 for (i = 0; i < nr_pages; i++) {
3031 struct page *page = pvec.pages[i];
3032
3033 /*
3034 * At this point we hold neither mapping->tree_lock nor
3035 * lock on the page itself: the page may be truncated or
3036 * invalidated (changing page->mapping to NULL), or even
3037 * swizzled back from swapper_space to tmpfs file
3038 * mapping
3039 */
Chris Mason01d658f2011-11-01 10:08:06 -04003040 if (tree->ops &&
3041 tree->ops->write_cache_pages_lock_hook) {
3042 tree->ops->write_cache_pages_lock_hook(page,
3043 data, flush_fn);
3044 } else {
3045 if (!trylock_page(page)) {
3046 flush_fn(data);
3047 lock_page(page);
3048 }
3049 }
Chris Masond1310b22008-01-24 16:13:08 -05003050
3051 if (unlikely(page->mapping != mapping)) {
3052 unlock_page(page);
3053 continue;
3054 }
3055
3056 if (!wbc->range_cyclic && page->index > end) {
3057 done = 1;
3058 unlock_page(page);
3059 continue;
3060 }
3061
Chris Masond2c3f4f2008-11-19 12:44:22 -05003062 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05003063 if (PageWriteback(page))
3064 flush_fn(data);
Chris Masond1310b22008-01-24 16:13:08 -05003065 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003066 }
Chris Masond1310b22008-01-24 16:13:08 -05003067
3068 if (PageWriteback(page) ||
3069 !clear_page_dirty_for_io(page)) {
3070 unlock_page(page);
3071 continue;
3072 }
3073
3074 ret = (*writepage)(page, wbc, data);
3075
3076 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3077 unlock_page(page);
3078 ret = 0;
3079 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003080 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05003081 done = 1;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003082
3083 /*
3084 * the filesystem may choose to bump up nr_to_write.
3085 * We have to make sure to honor the new nr_to_write
3086 * at any time
3087 */
3088 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05003089 }
3090 pagevec_release(&pvec);
3091 cond_resched();
3092 }
3093 if (!scanned && !done) {
3094 /*
3095 * We hit the last page and there is more work to be done: wrap
3096 * back to the start of the file
3097 */
3098 scanned = 1;
3099 index = 0;
3100 goto retry;
3101 }
Chris Masond1310b22008-01-24 16:13:08 -05003102 return ret;
3103}
Chris Masond1310b22008-01-24 16:13:08 -05003104
Chris Masonffbd5172009-04-20 15:50:09 -04003105static void flush_epd_write_bio(struct extent_page_data *epd)
3106{
3107 if (epd->bio) {
3108 if (epd->sync_io)
3109 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
3110 else
3111 submit_one_bio(WRITE, epd->bio, 0, 0);
3112 epd->bio = NULL;
3113 }
3114}
3115
Chris Masond2c3f4f2008-11-19 12:44:22 -05003116static noinline void flush_write_bio(void *data)
3117{
3118 struct extent_page_data *epd = data;
Chris Masonffbd5172009-04-20 15:50:09 -04003119 flush_epd_write_bio(epd);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003120}
3121
Chris Masond1310b22008-01-24 16:13:08 -05003122int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3123 get_extent_t *get_extent,
3124 struct writeback_control *wbc)
3125{
3126 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05003127 struct extent_page_data epd = {
3128 .bio = NULL,
3129 .tree = tree,
3130 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05003131 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04003132 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05003133 };
Chris Masond1310b22008-01-24 16:13:08 -05003134
Chris Masond1310b22008-01-24 16:13:08 -05003135 ret = __extent_writepage(page, wbc, &epd);
3136
Chris Masonffbd5172009-04-20 15:50:09 -04003137 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05003138 return ret;
3139}
Chris Masond1310b22008-01-24 16:13:08 -05003140
Chris Mason771ed682008-11-06 22:02:51 -05003141int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3142 u64 start, u64 end, get_extent_t *get_extent,
3143 int mode)
3144{
3145 int ret = 0;
3146 struct address_space *mapping = inode->i_mapping;
3147 struct page *page;
3148 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3149 PAGE_CACHE_SHIFT;
3150
3151 struct extent_page_data epd = {
3152 .bio = NULL,
3153 .tree = tree,
3154 .get_extent = get_extent,
3155 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04003156 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05003157 };
3158 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05003159 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05003160 .nr_to_write = nr_pages * 2,
3161 .range_start = start,
3162 .range_end = end + 1,
3163 };
3164
Chris Masond3977122009-01-05 21:25:51 -05003165 while (start <= end) {
Chris Mason771ed682008-11-06 22:02:51 -05003166 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3167 if (clear_page_dirty_for_io(page))
3168 ret = __extent_writepage(page, &wbc_writepages, &epd);
3169 else {
3170 if (tree->ops && tree->ops->writepage_end_io_hook)
3171 tree->ops->writepage_end_io_hook(page, start,
3172 start + PAGE_CACHE_SIZE - 1,
3173 NULL, 1);
3174 unlock_page(page);
3175 }
3176 page_cache_release(page);
3177 start += PAGE_CACHE_SIZE;
3178 }
3179
Chris Masonffbd5172009-04-20 15:50:09 -04003180 flush_epd_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05003181 return ret;
3182}
Chris Masond1310b22008-01-24 16:13:08 -05003183
3184int extent_writepages(struct extent_io_tree *tree,
3185 struct address_space *mapping,
3186 get_extent_t *get_extent,
3187 struct writeback_control *wbc)
3188{
3189 int ret = 0;
3190 struct extent_page_data epd = {
3191 .bio = NULL,
3192 .tree = tree,
3193 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05003194 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04003195 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05003196 };
3197
Chris Mason4bef0842008-09-08 11:18:08 -04003198 ret = extent_write_cache_pages(tree, mapping, wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05003199 __extent_writepage, &epd,
3200 flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04003201 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05003202 return ret;
3203}
Chris Masond1310b22008-01-24 16:13:08 -05003204
3205int extent_readpages(struct extent_io_tree *tree,
3206 struct address_space *mapping,
3207 struct list_head *pages, unsigned nr_pages,
3208 get_extent_t get_extent)
3209{
3210 struct bio *bio = NULL;
3211 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04003212 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003213
Chris Masond1310b22008-01-24 16:13:08 -05003214 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3215 struct page *page = list_entry(pages->prev, struct page, lru);
3216
3217 prefetchw(&page->flags);
3218 list_del(&page->lru);
Nick Piggin28ecb6092010-03-17 13:31:04 +00003219 if (!add_to_page_cache_lru(page, mapping,
Itaru Kitayama43e817a2011-04-25 19:43:51 -04003220 page->index, GFP_NOFS)) {
Chris Masonf1885912008-04-09 16:28:12 -04003221 __extent_read_full_page(tree, page, get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04003222 &bio, 0, &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003223 }
3224 page_cache_release(page);
3225 }
Chris Masond1310b22008-01-24 16:13:08 -05003226 BUG_ON(!list_empty(pages));
3227 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04003228 submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003229 return 0;
3230}
Chris Masond1310b22008-01-24 16:13:08 -05003231
3232/*
3233 * basic invalidatepage code, this waits on any locked or writeback
3234 * ranges corresponding to the page, and then deletes any extent state
3235 * records from the tree
3236 */
3237int extent_invalidatepage(struct extent_io_tree *tree,
3238 struct page *page, unsigned long offset)
3239{
Josef Bacik2ac55d42010-02-03 19:33:23 +00003240 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003241 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3242 u64 end = start + PAGE_CACHE_SIZE - 1;
3243 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3244
Chris Masond3977122009-01-05 21:25:51 -05003245 start += (offset + blocksize - 1) & ~(blocksize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05003246 if (start > end)
3247 return 0;
3248
Josef Bacik2ac55d42010-02-03 19:33:23 +00003249 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
Chris Mason1edbb732009-09-02 13:24:36 -04003250 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05003251 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04003252 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3253 EXTENT_DO_ACCOUNTING,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003254 1, 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05003255 return 0;
3256}
Chris Masond1310b22008-01-24 16:13:08 -05003257
3258/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04003259 * a helper for releasepage, this tests for areas of the page that
3260 * are locked or under IO and drops the related state bits if it is safe
3261 * to drop the page.
3262 */
3263int try_release_extent_state(struct extent_map_tree *map,
3264 struct extent_io_tree *tree, struct page *page,
3265 gfp_t mask)
3266{
3267 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3268 u64 end = start + PAGE_CACHE_SIZE - 1;
3269 int ret = 1;
3270
Chris Mason211f90e2008-07-18 11:56:15 -04003271 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04003272 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04003273 ret = 0;
3274 else {
3275 if ((mask & GFP_NOFS) == GFP_NOFS)
3276 mask = GFP_NOFS;
Chris Mason11ef1602009-09-23 20:28:46 -04003277 /*
3278 * at this point we can safely clear everything except the
3279 * locked bit and the nodatasum bit
3280 */
Chris Masone3f24cc2011-02-14 12:52:08 -05003281 ret = clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04003282 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3283 0, 0, NULL, mask);
Chris Masone3f24cc2011-02-14 12:52:08 -05003284
3285 /* if clear_extent_bit failed for enomem reasons,
3286 * we can't allow the release to continue.
3287 */
3288 if (ret < 0)
3289 ret = 0;
3290 else
3291 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04003292 }
3293 return ret;
3294}
Chris Mason7b13b7b2008-04-18 10:29:50 -04003295
3296/*
Chris Masond1310b22008-01-24 16:13:08 -05003297 * a helper for releasepage. As long as there are no locked extents
3298 * in the range corresponding to the page, both state records and extent
3299 * map records are removed
3300 */
3301int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05003302 struct extent_io_tree *tree, struct page *page,
3303 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05003304{
3305 struct extent_map *em;
3306 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3307 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04003308
Chris Mason70dec802008-01-29 09:59:12 -05003309 if ((mask & __GFP_WAIT) &&
3310 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05003311 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05003312 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05003313 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04003314 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05003315 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09003316 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04003317 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003318 break;
3319 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003320 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3321 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04003322 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003323 free_extent_map(em);
3324 break;
3325 }
3326 if (!test_range_bit(tree, em->start,
3327 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04003328 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04003329 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05003330 remove_extent_mapping(map, em);
3331 /* once for the rb tree */
3332 free_extent_map(em);
3333 }
3334 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04003335 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003336
3337 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05003338 free_extent_map(em);
3339 }
Chris Masond1310b22008-01-24 16:13:08 -05003340 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04003341 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05003342}
Chris Masond1310b22008-01-24 16:13:08 -05003343
Chris Masonec29ed52011-02-23 16:23:20 -05003344/*
3345 * helper function for fiemap, which doesn't want to see any holes.
3346 * This maps until we find something past 'last'
3347 */
3348static struct extent_map *get_extent_skip_holes(struct inode *inode,
3349 u64 offset,
3350 u64 last,
3351 get_extent_t *get_extent)
3352{
3353 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3354 struct extent_map *em;
3355 u64 len;
3356
3357 if (offset >= last)
3358 return NULL;
3359
3360 while(1) {
3361 len = last - offset;
3362 if (len == 0)
3363 break;
3364 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3365 em = get_extent(inode, NULL, 0, offset, len, 0);
David Sterbac7040052011-04-19 18:00:01 +02003366 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05003367 return em;
3368
3369 /* if this isn't a hole return it */
3370 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3371 em->block_start != EXTENT_MAP_HOLE) {
3372 return em;
3373 }
3374
3375 /* this is a hole, advance to the next extent */
3376 offset = extent_map_end(em);
3377 free_extent_map(em);
3378 if (offset >= last)
3379 break;
3380 }
3381 return NULL;
3382}
3383
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003384int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3385 __u64 start, __u64 len, get_extent_t *get_extent)
3386{
Josef Bacik975f84f2010-11-23 19:36:57 +00003387 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003388 u64 off = start;
3389 u64 max = start + len;
3390 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00003391 u32 found_type;
3392 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05003393 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003394 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05003395 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00003396 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003397 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00003398 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00003399 struct btrfs_path *path;
3400 struct btrfs_file_extent_item *item;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003401 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05003402 u64 em_start = 0;
3403 u64 em_len = 0;
3404 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003405 unsigned long emflags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003406
3407 if (len == 0)
3408 return -EINVAL;
3409
Josef Bacik975f84f2010-11-23 19:36:57 +00003410 path = btrfs_alloc_path();
3411 if (!path)
3412 return -ENOMEM;
3413 path->leave_spinning = 1;
3414
Josef Bacik4d479cf2011-11-17 11:34:31 -05003415 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3416 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3417
Chris Masonec29ed52011-02-23 16:23:20 -05003418 /*
3419 * lookup the last file extent. We're not using i_size here
3420 * because there might be preallocation past i_size
3421 */
Josef Bacik975f84f2010-11-23 19:36:57 +00003422 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
Li Zefan33345d012011-04-20 10:31:50 +08003423 path, btrfs_ino(inode), -1, 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00003424 if (ret < 0) {
3425 btrfs_free_path(path);
3426 return ret;
3427 }
3428 WARN_ON(!ret);
3429 path->slots[0]--;
3430 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3431 struct btrfs_file_extent_item);
3432 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3433 found_type = btrfs_key_type(&found_key);
3434
Chris Masonec29ed52011-02-23 16:23:20 -05003435 /* No extents, but there might be delalloc bits */
Li Zefan33345d012011-04-20 10:31:50 +08003436 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00003437 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05003438 /* have to trust i_size as the end */
3439 last = (u64)-1;
3440 last_for_get_extent = isize;
3441 } else {
3442 /*
3443 * remember the start of the last extent. There are a
3444 * bunch of different factors that go into the length of the
3445 * extent, so its much less complex to remember where it started
3446 */
3447 last = found_key.offset;
3448 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00003449 }
Josef Bacik975f84f2010-11-23 19:36:57 +00003450 btrfs_free_path(path);
3451
Chris Masonec29ed52011-02-23 16:23:20 -05003452 /*
3453 * we might have some extents allocated but more delalloc past those
3454 * extents. so, we trust isize unless the start of the last extent is
3455 * beyond isize
3456 */
3457 if (last < isize) {
3458 last = (u64)-1;
3459 last_for_get_extent = isize;
3460 }
3461
Josef Bacik2ac55d42010-02-03 19:33:23 +00003462 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3463 &cached_state, GFP_NOFS);
Chris Masonec29ed52011-02-23 16:23:20 -05003464
Josef Bacik4d479cf2011-11-17 11:34:31 -05003465 em = get_extent_skip_holes(inode, start, last_for_get_extent,
Chris Masonec29ed52011-02-23 16:23:20 -05003466 get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003467 if (!em)
3468 goto out;
3469 if (IS_ERR(em)) {
3470 ret = PTR_ERR(em);
3471 goto out;
3472 }
Josef Bacik975f84f2010-11-23 19:36:57 +00003473
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003474 while (!end) {
Chris Masonea8efc72011-03-08 11:54:40 -05003475 u64 offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003476
Chris Masonea8efc72011-03-08 11:54:40 -05003477 /* break if the extent we found is outside the range */
3478 if (em->start >= max || extent_map_end(em) < off)
3479 break;
3480
3481 /*
3482 * get_extent may return an extent that starts before our
3483 * requested range. We have to make sure the ranges
3484 * we return to fiemap always move forward and don't
3485 * overlap, so adjust the offsets here
3486 */
3487 em_start = max(em->start, off);
3488
3489 /*
3490 * record the offset from the start of the extent
3491 * for adjusting the disk offset below
3492 */
3493 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05003494 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05003495 em_len = em_end - em_start;
Chris Masonec29ed52011-02-23 16:23:20 -05003496 emflags = em->flags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003497 disko = 0;
3498 flags = 0;
3499
Chris Masonea8efc72011-03-08 11:54:40 -05003500 /*
3501 * bump off for our next call to get_extent
3502 */
3503 off = extent_map_end(em);
3504 if (off >= max)
3505 end = 1;
3506
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003507 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003508 end = 1;
3509 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003510 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003511 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3512 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003513 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003514 flags |= (FIEMAP_EXTENT_DELALLOC |
3515 FIEMAP_EXTENT_UNKNOWN);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003516 } else {
Chris Masonea8efc72011-03-08 11:54:40 -05003517 disko = em->block_start + offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003518 }
3519 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3520 flags |= FIEMAP_EXTENT_ENCODED;
3521
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003522 free_extent_map(em);
3523 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05003524 if ((em_start >= last) || em_len == (u64)-1 ||
3525 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003526 flags |= FIEMAP_EXTENT_LAST;
3527 end = 1;
3528 }
3529
Chris Masonec29ed52011-02-23 16:23:20 -05003530 /* now scan forward to see if this is really the last extent. */
3531 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3532 get_extent);
3533 if (IS_ERR(em)) {
3534 ret = PTR_ERR(em);
3535 goto out;
3536 }
3537 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00003538 flags |= FIEMAP_EXTENT_LAST;
3539 end = 1;
3540 }
Chris Masonec29ed52011-02-23 16:23:20 -05003541 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3542 em_len, flags);
3543 if (ret)
3544 goto out_free;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003545 }
3546out_free:
3547 free_extent_map(em);
3548out:
Josef Bacik2ac55d42010-02-03 19:33:23 +00003549 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3550 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003551 return ret;
3552}
3553
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02003554inline struct page *extent_buffer_page(struct extent_buffer *eb,
Chris Masond1310b22008-01-24 16:13:08 -05003555 unsigned long i)
3556{
3557 struct page *p;
3558 struct address_space *mapping;
3559
3560 if (i == 0)
3561 return eb->first_page;
3562 i += eb->start >> PAGE_CACHE_SHIFT;
3563 mapping = eb->first_page->mapping;
Chris Mason33958dc2008-07-30 10:29:12 -04003564 if (!mapping)
3565 return NULL;
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003566
3567 /*
3568 * extent_buffer_page is only called after pinning the page
3569 * by increasing the reference count. So we know the page must
3570 * be in the radix tree.
3571 */
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003572 rcu_read_lock();
Chris Masond1310b22008-01-24 16:13:08 -05003573 p = radix_tree_lookup(&mapping->page_tree, i);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003574 rcu_read_unlock();
Chris Mason2b1f55b2008-09-24 11:48:04 -04003575
Chris Masond1310b22008-01-24 16:13:08 -05003576 return p;
3577}
3578
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02003579inline unsigned long num_extent_pages(u64 start, u64 len)
Chris Masonce9adaa2008-04-09 16:28:12 -04003580{
Chris Mason6af118c2008-07-22 11:18:07 -04003581 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3582 (start >> PAGE_CACHE_SHIFT);
Chris Mason728131d2008-04-09 16:28:12 -04003583}
3584
Chris Masond1310b22008-01-24 16:13:08 -05003585static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3586 u64 start,
3587 unsigned long len,
3588 gfp_t mask)
3589{
3590 struct extent_buffer *eb = NULL;
Chris Mason39351272009-02-04 09:24:05 -05003591#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003592 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -04003593#endif
Chris Masond1310b22008-01-24 16:13:08 -05003594
Chris Masond1310b22008-01-24 16:13:08 -05003595 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Tsutomu Itoh91ca3382011-01-05 02:32:22 +00003596 if (eb == NULL)
3597 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003598 eb->start = start;
3599 eb->len = len;
Chris Masonbd681512011-07-16 15:23:14 -04003600 rwlock_init(&eb->lock);
3601 atomic_set(&eb->write_locks, 0);
3602 atomic_set(&eb->read_locks, 0);
3603 atomic_set(&eb->blocking_readers, 0);
3604 atomic_set(&eb->blocking_writers, 0);
3605 atomic_set(&eb->spinning_readers, 0);
3606 atomic_set(&eb->spinning_writers, 0);
Arne Jansen5b25f702011-09-13 10:55:48 +02003607 eb->lock_nested = 0;
Chris Masonbd681512011-07-16 15:23:14 -04003608 init_waitqueue_head(&eb->write_lock_wq);
3609 init_waitqueue_head(&eb->read_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -05003610
Chris Mason39351272009-02-04 09:24:05 -05003611#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003612 spin_lock_irqsave(&leak_lock, flags);
3613 list_add(&eb->leak_list, &buffers);
3614 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003615#endif
Chris Masond1310b22008-01-24 16:13:08 -05003616 atomic_set(&eb->refs, 1);
3617
3618 return eb;
3619}
3620
3621static void __free_extent_buffer(struct extent_buffer *eb)
3622{
Chris Mason39351272009-02-04 09:24:05 -05003623#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003624 unsigned long flags;
3625 spin_lock_irqsave(&leak_lock, flags);
3626 list_del(&eb->leak_list);
3627 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003628#endif
Chris Masond1310b22008-01-24 16:13:08 -05003629 kmem_cache_free(extent_buffer_cache, eb);
3630}
3631
Miao Xie897ca6e2010-10-26 20:57:29 -04003632/*
3633 * Helper for releasing extent buffer page.
3634 */
3635static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3636 unsigned long start_idx)
3637{
3638 unsigned long index;
3639 struct page *page;
3640
3641 if (!eb->first_page)
3642 return;
3643
3644 index = num_extent_pages(eb->start, eb->len);
3645 if (start_idx >= index)
3646 return;
3647
3648 do {
3649 index--;
3650 page = extent_buffer_page(eb, index);
3651 if (page)
3652 page_cache_release(page);
3653 } while (index != start_idx);
3654}
3655
3656/*
3657 * Helper for releasing the extent buffer.
3658 */
3659static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3660{
3661 btrfs_release_extent_buffer_page(eb, 0);
3662 __free_extent_buffer(eb);
3663}
3664
Chris Masond1310b22008-01-24 16:13:08 -05003665struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3666 u64 start, unsigned long len,
David Sterbaba144192011-04-21 01:12:06 +02003667 struct page *page0)
Chris Masond1310b22008-01-24 16:13:08 -05003668{
3669 unsigned long num_pages = num_extent_pages(start, len);
3670 unsigned long i;
3671 unsigned long index = start >> PAGE_CACHE_SHIFT;
3672 struct extent_buffer *eb;
Chris Mason6af118c2008-07-22 11:18:07 -04003673 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003674 struct page *p;
3675 struct address_space *mapping = tree->mapping;
3676 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04003677 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05003678
Miao Xie19fe0a82010-10-26 20:57:29 -04003679 rcu_read_lock();
3680 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3681 if (eb && atomic_inc_not_zero(&eb->refs)) {
3682 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04003683 mark_page_accessed(eb->first_page);
Chris Mason6af118c2008-07-22 11:18:07 -04003684 return eb;
3685 }
Miao Xie19fe0a82010-10-26 20:57:29 -04003686 rcu_read_unlock();
Chris Mason6af118c2008-07-22 11:18:07 -04003687
David Sterbaba144192011-04-21 01:12:06 +02003688 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
Peter2b114d12008-04-01 11:21:40 -04003689 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05003690 return NULL;
3691
Chris Masond1310b22008-01-24 16:13:08 -05003692 if (page0) {
3693 eb->first_page = page0;
3694 i = 1;
3695 index++;
3696 page_cache_get(page0);
3697 mark_page_accessed(page0);
3698 set_page_extent_mapped(page0);
Chris Masond1310b22008-01-24 16:13:08 -05003699 set_page_extent_head(page0, len);
Chris Masonf1885912008-04-09 16:28:12 -04003700 uptodate = PageUptodate(page0);
Chris Masond1310b22008-01-24 16:13:08 -05003701 } else {
3702 i = 0;
3703 }
3704 for (; i < num_pages; i++, index++) {
Chris Masona6591712011-07-19 12:04:14 -04003705 p = find_or_create_page(mapping, index, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05003706 if (!p) {
3707 WARN_ON(1);
Chris Mason6af118c2008-07-22 11:18:07 -04003708 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05003709 }
3710 set_page_extent_mapped(p);
3711 mark_page_accessed(p);
3712 if (i == 0) {
3713 eb->first_page = p;
3714 set_page_extent_head(p, len);
3715 } else {
3716 set_page_private(p, EXTENT_PAGE_PRIVATE);
3717 }
3718 if (!PageUptodate(p))
3719 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05003720
3721 /*
3722 * see below about how we avoid a nasty race with release page
3723 * and why we unlock later
3724 */
3725 if (i != 0)
3726 unlock_page(p);
Chris Masond1310b22008-01-24 16:13:08 -05003727 }
3728 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05003729 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003730
Miao Xie19fe0a82010-10-26 20:57:29 -04003731 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3732 if (ret)
3733 goto free_eb;
3734
Chris Mason6af118c2008-07-22 11:18:07 -04003735 spin_lock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003736 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3737 if (ret == -EEXIST) {
3738 exists = radix_tree_lookup(&tree->buffer,
3739 start >> PAGE_CACHE_SHIFT);
Chris Mason6af118c2008-07-22 11:18:07 -04003740 /* add one reference for the caller */
3741 atomic_inc(&exists->refs);
3742 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003743 radix_tree_preload_end();
Chris Mason6af118c2008-07-22 11:18:07 -04003744 goto free_eb;
3745 }
Chris Mason6af118c2008-07-22 11:18:07 -04003746 /* add one reference for the tree */
3747 atomic_inc(&eb->refs);
Yan, Zhengf044ba72010-02-04 08:46:56 +00003748 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04003749 radix_tree_preload_end();
Chris Masoneb14ab82011-02-10 12:35:00 -05003750
3751 /*
3752 * there is a race where release page may have
3753 * tried to find this extent buffer in the radix
3754 * but failed. It will tell the VM it is safe to
3755 * reclaim the, and it will clear the page private bit.
3756 * We must make sure to set the page private bit properly
3757 * after the extent buffer is in the radix tree so
3758 * it doesn't get lost
3759 */
3760 set_page_extent_mapped(eb->first_page);
3761 set_page_extent_head(eb->first_page, eb->len);
3762 if (!page0)
3763 unlock_page(eb->first_page);
Chris Masond1310b22008-01-24 16:13:08 -05003764 return eb;
3765
Chris Mason6af118c2008-07-22 11:18:07 -04003766free_eb:
Chris Masoneb14ab82011-02-10 12:35:00 -05003767 if (eb->first_page && !page0)
3768 unlock_page(eb->first_page);
3769
Chris Masond1310b22008-01-24 16:13:08 -05003770 if (!atomic_dec_and_test(&eb->refs))
Chris Mason6af118c2008-07-22 11:18:07 -04003771 return exists;
Miao Xie897ca6e2010-10-26 20:57:29 -04003772 btrfs_release_extent_buffer(eb);
Chris Mason6af118c2008-07-22 11:18:07 -04003773 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05003774}
Chris Masond1310b22008-01-24 16:13:08 -05003775
3776struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
David Sterbaf09d1f62011-04-21 01:08:01 +02003777 u64 start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05003778{
Chris Masond1310b22008-01-24 16:13:08 -05003779 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -05003780
Miao Xie19fe0a82010-10-26 20:57:29 -04003781 rcu_read_lock();
3782 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3783 if (eb && atomic_inc_not_zero(&eb->refs)) {
3784 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04003785 mark_page_accessed(eb->first_page);
Miao Xie19fe0a82010-10-26 20:57:29 -04003786 return eb;
3787 }
3788 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04003789
Miao Xie19fe0a82010-10-26 20:57:29 -04003790 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003791}
Chris Masond1310b22008-01-24 16:13:08 -05003792
3793void free_extent_buffer(struct extent_buffer *eb)
3794{
Chris Masond1310b22008-01-24 16:13:08 -05003795 if (!eb)
3796 return;
3797
3798 if (!atomic_dec_and_test(&eb->refs))
3799 return;
3800
Chris Mason6af118c2008-07-22 11:18:07 -04003801 WARN_ON(1);
Chris Masond1310b22008-01-24 16:13:08 -05003802}
Chris Masond1310b22008-01-24 16:13:08 -05003803
3804int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3805 struct extent_buffer *eb)
3806{
Chris Masond1310b22008-01-24 16:13:08 -05003807 unsigned long i;
3808 unsigned long num_pages;
3809 struct page *page;
3810
Chris Masond1310b22008-01-24 16:13:08 -05003811 num_pages = num_extent_pages(eb->start, eb->len);
3812
3813 for (i = 0; i < num_pages; i++) {
3814 page = extent_buffer_page(eb, i);
Chris Masonb9473432009-03-13 11:00:37 -04003815 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05003816 continue;
3817
Chris Masona61e6f22008-07-22 11:18:08 -04003818 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05003819 WARN_ON(!PagePrivate(page));
3820
3821 set_page_extent_mapped(page);
Chris Masond1310b22008-01-24 16:13:08 -05003822 if (i == 0)
3823 set_page_extent_head(page, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05003824
Chris Masond1310b22008-01-24 16:13:08 -05003825 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003826 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003827 if (!PageDirty(page)) {
3828 radix_tree_tag_clear(&page->mapping->page_tree,
3829 page_index(page),
3830 PAGECACHE_TAG_DIRTY);
3831 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003832 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masonbf0da8c2011-11-04 12:29:37 -04003833 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04003834 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003835 }
3836 return 0;
3837}
Chris Masond1310b22008-01-24 16:13:08 -05003838
Chris Masond1310b22008-01-24 16:13:08 -05003839int set_extent_buffer_dirty(struct extent_io_tree *tree,
3840 struct extent_buffer *eb)
3841{
3842 unsigned long i;
3843 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04003844 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003845
Chris Masonb9473432009-03-13 11:00:37 -04003846 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05003847 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masonb9473432009-03-13 11:00:37 -04003848 for (i = 0; i < num_pages; i++)
Chris Masond1310b22008-01-24 16:13:08 -05003849 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
Chris Masonb9473432009-03-13 11:00:37 -04003850 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05003851}
Chris Masond1310b22008-01-24 16:13:08 -05003852
Chris Mason19b6caf2011-07-25 06:50:50 -04003853static int __eb_straddles_pages(u64 start, u64 len)
3854{
3855 if (len < PAGE_CACHE_SIZE)
3856 return 1;
3857 if (start & (PAGE_CACHE_SIZE - 1))
3858 return 1;
3859 if ((start + len) & (PAGE_CACHE_SIZE - 1))
3860 return 1;
3861 return 0;
3862}
3863
3864static int eb_straddles_pages(struct extent_buffer *eb)
3865{
3866 return __eb_straddles_pages(eb->start, eb->len);
3867}
3868
Chris Mason1259ab72008-05-12 13:39:03 -04003869int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003870 struct extent_buffer *eb,
3871 struct extent_state **cached_state)
Chris Mason1259ab72008-05-12 13:39:03 -04003872{
3873 unsigned long i;
3874 struct page *page;
3875 unsigned long num_pages;
3876
3877 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masonb4ce94d2009-02-04 09:25:08 -05003878 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Mason1259ab72008-05-12 13:39:03 -04003879
Chris Mason19b6caf2011-07-25 06:50:50 -04003880 if (eb_straddles_pages(eb)) {
3881 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3882 cached_state, GFP_NOFS);
3883 }
Chris Mason1259ab72008-05-12 13:39:03 -04003884 for (i = 0; i < num_pages; i++) {
3885 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04003886 if (page)
3887 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04003888 }
3889 return 0;
3890}
3891
Chris Masond1310b22008-01-24 16:13:08 -05003892int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3893 struct extent_buffer *eb)
3894{
3895 unsigned long i;
3896 struct page *page;
3897 unsigned long num_pages;
3898
3899 num_pages = num_extent_pages(eb->start, eb->len);
3900
Chris Mason19b6caf2011-07-25 06:50:50 -04003901 if (eb_straddles_pages(eb)) {
3902 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3903 NULL, GFP_NOFS);
3904 }
Chris Masond1310b22008-01-24 16:13:08 -05003905 for (i = 0; i < num_pages; i++) {
3906 page = extent_buffer_page(eb, i);
3907 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3908 ((i == num_pages - 1) &&
3909 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3910 check_page_uptodate(tree, page);
3911 continue;
3912 }
3913 SetPageUptodate(page);
3914 }
3915 return 0;
3916}
Chris Masond1310b22008-01-24 16:13:08 -05003917
Chris Masonce9adaa2008-04-09 16:28:12 -04003918int extent_range_uptodate(struct extent_io_tree *tree,
3919 u64 start, u64 end)
3920{
3921 struct page *page;
3922 int ret;
3923 int pg_uptodate = 1;
3924 int uptodate;
3925 unsigned long index;
3926
Chris Mason19b6caf2011-07-25 06:50:50 -04003927 if (__eb_straddles_pages(start, end - start + 1)) {
3928 ret = test_range_bit(tree, start, end,
3929 EXTENT_UPTODATE, 1, NULL);
3930 if (ret)
3931 return 1;
3932 }
Chris Masond3977122009-01-05 21:25:51 -05003933 while (start <= end) {
Chris Masonce9adaa2008-04-09 16:28:12 -04003934 index = start >> PAGE_CACHE_SHIFT;
3935 page = find_get_page(tree->mapping, index);
Mitch Harder8bedd512012-01-26 15:01:11 -05003936 if (!page)
3937 return 1;
Chris Masonce9adaa2008-04-09 16:28:12 -04003938 uptodate = PageUptodate(page);
3939 page_cache_release(page);
3940 if (!uptodate) {
3941 pg_uptodate = 0;
3942 break;
3943 }
3944 start += PAGE_CACHE_SIZE;
3945 }
3946 return pg_uptodate;
3947}
3948
Chris Masond1310b22008-01-24 16:13:08 -05003949int extent_buffer_uptodate(struct extent_io_tree *tree,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003950 struct extent_buffer *eb,
3951 struct extent_state *cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05003952{
Chris Mason728131d2008-04-09 16:28:12 -04003953 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003954 unsigned long num_pages;
3955 unsigned long i;
Chris Mason728131d2008-04-09 16:28:12 -04003956 struct page *page;
3957 int pg_uptodate = 1;
3958
Chris Masonb4ce94d2009-02-04 09:25:08 -05003959 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Mason42352982008-04-28 16:40:52 -04003960 return 1;
Chris Mason728131d2008-04-09 16:28:12 -04003961
Chris Mason19b6caf2011-07-25 06:50:50 -04003962 if (eb_straddles_pages(eb)) {
3963 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3964 EXTENT_UPTODATE, 1, cached_state);
3965 if (ret)
3966 return ret;
3967 }
Chris Mason728131d2008-04-09 16:28:12 -04003968
3969 num_pages = num_extent_pages(eb->start, eb->len);
3970 for (i = 0; i < num_pages; i++) {
3971 page = extent_buffer_page(eb, i);
3972 if (!PageUptodate(page)) {
3973 pg_uptodate = 0;
3974 break;
3975 }
3976 }
Chris Mason42352982008-04-28 16:40:52 -04003977 return pg_uptodate;
Chris Masond1310b22008-01-24 16:13:08 -05003978}
Chris Masond1310b22008-01-24 16:13:08 -05003979
3980int read_extent_buffer_pages(struct extent_io_tree *tree,
Arne Jansenbb82ab82011-06-10 14:06:53 +02003981 struct extent_buffer *eb, u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04003982 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003983{
3984 unsigned long i;
3985 unsigned long start_i;
3986 struct page *page;
3987 int err;
3988 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003989 int locked_pages = 0;
3990 int all_uptodate = 1;
3991 int inc_all_pages = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003992 unsigned long num_pages;
Chris Masona86c12c2008-02-07 10:50:54 -05003993 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003994 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05003995
Chris Masonb4ce94d2009-02-04 09:25:08 -05003996 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05003997 return 0;
3998
Chris Mason19b6caf2011-07-25 06:50:50 -04003999 if (eb_straddles_pages(eb)) {
4000 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
4001 EXTENT_UPTODATE, 1, NULL)) {
4002 return 0;
4003 }
Chris Masond1310b22008-01-24 16:13:08 -05004004 }
4005
4006 if (start) {
4007 WARN_ON(start < eb->start);
4008 start_i = (start >> PAGE_CACHE_SHIFT) -
4009 (eb->start >> PAGE_CACHE_SHIFT);
4010 } else {
4011 start_i = 0;
4012 }
4013
4014 num_pages = num_extent_pages(eb->start, eb->len);
4015 for (i = start_i; i < num_pages; i++) {
4016 page = extent_buffer_page(eb, i);
Arne Jansenbb82ab82011-06-10 14:06:53 +02004017 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04004018 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04004019 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05004020 } else {
4021 lock_page(page);
4022 }
Chris Masonce9adaa2008-04-09 16:28:12 -04004023 locked_pages++;
Chris Masond3977122009-01-05 21:25:51 -05004024 if (!PageUptodate(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04004025 all_uptodate = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04004026 }
4027 if (all_uptodate) {
4028 if (start_i == 0)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004029 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04004030 goto unlock_exit;
4031 }
4032
4033 for (i = start_i; i < num_pages; i++) {
4034 page = extent_buffer_page(eb, i);
Chris Masoneb14ab82011-02-10 12:35:00 -05004035
4036 WARN_ON(!PagePrivate(page));
4037
4038 set_page_extent_mapped(page);
4039 if (i == 0)
4040 set_page_extent_head(page, eb->len);
4041
Chris Masonce9adaa2008-04-09 16:28:12 -04004042 if (inc_all_pages)
4043 page_cache_get(page);
4044 if (!PageUptodate(page)) {
4045 if (start_i == 0)
4046 inc_all_pages = 1;
Chris Masonf1885912008-04-09 16:28:12 -04004047 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05004048 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04004049 get_extent, &bio,
Chris Masonc8b97812008-10-29 14:49:59 -04004050 mirror_num, &bio_flags);
Chris Masond3977122009-01-05 21:25:51 -05004051 if (err)
Chris Masond1310b22008-01-24 16:13:08 -05004052 ret = err;
Chris Masond1310b22008-01-24 16:13:08 -05004053 } else {
4054 unlock_page(page);
4055 }
4056 }
4057
Chris Masona86c12c2008-02-07 10:50:54 -05004058 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04004059 submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masona86c12c2008-02-07 10:50:54 -05004060
Arne Jansenbb82ab82011-06-10 14:06:53 +02004061 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05004062 return ret;
Chris Masond3977122009-01-05 21:25:51 -05004063
Chris Masond1310b22008-01-24 16:13:08 -05004064 for (i = start_i; i < num_pages; i++) {
4065 page = extent_buffer_page(eb, i);
4066 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05004067 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05004068 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05004069 }
Chris Masond3977122009-01-05 21:25:51 -05004070
Chris Masond1310b22008-01-24 16:13:08 -05004071 if (!ret)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004072 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05004073 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04004074
4075unlock_exit:
4076 i = start_i;
Chris Masond3977122009-01-05 21:25:51 -05004077 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04004078 page = extent_buffer_page(eb, i);
4079 i++;
4080 unlock_page(page);
4081 locked_pages--;
4082 }
4083 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004084}
Chris Masond1310b22008-01-24 16:13:08 -05004085
4086void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4087 unsigned long start,
4088 unsigned long len)
4089{
4090 size_t cur;
4091 size_t offset;
4092 struct page *page;
4093 char *kaddr;
4094 char *dst = (char *)dstv;
4095 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4096 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05004097
4098 WARN_ON(start > eb->len);
4099 WARN_ON(start + len > eb->start + eb->len);
4100
4101 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4102
Chris Masond3977122009-01-05 21:25:51 -05004103 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004104 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004105
4106 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04004107 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004108 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004109
4110 dst += cur;
4111 len -= cur;
4112 offset = 0;
4113 i++;
4114 }
4115}
Chris Masond1310b22008-01-24 16:13:08 -05004116
4117int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masona6591712011-07-19 12:04:14 -04004118 unsigned long min_len, char **map,
Chris Masond1310b22008-01-24 16:13:08 -05004119 unsigned long *map_start,
Chris Masona6591712011-07-19 12:04:14 -04004120 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05004121{
4122 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4123 char *kaddr;
4124 struct page *p;
4125 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4126 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4127 unsigned long end_i = (start_offset + start + min_len - 1) >>
4128 PAGE_CACHE_SHIFT;
4129
4130 if (i != end_i)
4131 return -EINVAL;
4132
4133 if (i == 0) {
4134 offset = start_offset;
4135 *map_start = 0;
4136 } else {
4137 offset = 0;
4138 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4139 }
Chris Masond3977122009-01-05 21:25:51 -05004140
Chris Masond1310b22008-01-24 16:13:08 -05004141 if (start + min_len > eb->len) {
Chris Masond3977122009-01-05 21:25:51 -05004142 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4143 "wanted %lu %lu\n", (unsigned long long)eb->start,
4144 eb->len, start, min_len);
Chris Masond1310b22008-01-24 16:13:08 -05004145 WARN_ON(1);
Josef Bacik850265332011-03-15 14:52:12 -04004146 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05004147 }
4148
4149 p = extent_buffer_page(eb, i);
Chris Masona6591712011-07-19 12:04:14 -04004150 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05004151 *map = kaddr + offset;
4152 *map_len = PAGE_CACHE_SIZE - offset;
4153 return 0;
4154}
Chris Masond1310b22008-01-24 16:13:08 -05004155
Chris Masond1310b22008-01-24 16:13:08 -05004156int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4157 unsigned long start,
4158 unsigned long len)
4159{
4160 size_t cur;
4161 size_t offset;
4162 struct page *page;
4163 char *kaddr;
4164 char *ptr = (char *)ptrv;
4165 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4166 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4167 int ret = 0;
4168
4169 WARN_ON(start > eb->len);
4170 WARN_ON(start + len > eb->start + eb->len);
4171
4172 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4173
Chris Masond3977122009-01-05 21:25:51 -05004174 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004175 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004176
4177 cur = min(len, (PAGE_CACHE_SIZE - offset));
4178
Chris Masona6591712011-07-19 12:04:14 -04004179 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004180 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004181 if (ret)
4182 break;
4183
4184 ptr += cur;
4185 len -= cur;
4186 offset = 0;
4187 i++;
4188 }
4189 return ret;
4190}
Chris Masond1310b22008-01-24 16:13:08 -05004191
4192void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4193 unsigned long start, unsigned long len)
4194{
4195 size_t cur;
4196 size_t offset;
4197 struct page *page;
4198 char *kaddr;
4199 char *src = (char *)srcv;
4200 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4201 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4202
4203 WARN_ON(start > eb->len);
4204 WARN_ON(start + len > eb->start + eb->len);
4205
4206 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4207
Chris Masond3977122009-01-05 21:25:51 -05004208 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004209 page = extent_buffer_page(eb, i);
4210 WARN_ON(!PageUptodate(page));
4211
4212 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04004213 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004214 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004215
4216 src += cur;
4217 len -= cur;
4218 offset = 0;
4219 i++;
4220 }
4221}
Chris Masond1310b22008-01-24 16:13:08 -05004222
4223void memset_extent_buffer(struct extent_buffer *eb, char c,
4224 unsigned long start, unsigned long len)
4225{
4226 size_t cur;
4227 size_t offset;
4228 struct page *page;
4229 char *kaddr;
4230 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4231 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4232
4233 WARN_ON(start > eb->len);
4234 WARN_ON(start + len > eb->start + eb->len);
4235
4236 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4237
Chris Masond3977122009-01-05 21:25:51 -05004238 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004239 page = extent_buffer_page(eb, i);
4240 WARN_ON(!PageUptodate(page));
4241
4242 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04004243 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004244 memset(kaddr + offset, c, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004245
4246 len -= cur;
4247 offset = 0;
4248 i++;
4249 }
4250}
Chris Masond1310b22008-01-24 16:13:08 -05004251
4252void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4253 unsigned long dst_offset, unsigned long src_offset,
4254 unsigned long len)
4255{
4256 u64 dst_len = dst->len;
4257 size_t cur;
4258 size_t offset;
4259 struct page *page;
4260 char *kaddr;
4261 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4262 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4263
4264 WARN_ON(src->len != dst_len);
4265
4266 offset = (start_offset + dst_offset) &
4267 ((unsigned long)PAGE_CACHE_SIZE - 1);
4268
Chris Masond3977122009-01-05 21:25:51 -05004269 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004270 page = extent_buffer_page(dst, i);
4271 WARN_ON(!PageUptodate(page));
4272
4273 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4274
Chris Masona6591712011-07-19 12:04:14 -04004275 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004276 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004277
4278 src_offset += cur;
4279 len -= cur;
4280 offset = 0;
4281 i++;
4282 }
4283}
Chris Masond1310b22008-01-24 16:13:08 -05004284
4285static void move_pages(struct page *dst_page, struct page *src_page,
4286 unsigned long dst_off, unsigned long src_off,
4287 unsigned long len)
4288{
Chris Masona6591712011-07-19 12:04:14 -04004289 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05004290 if (dst_page == src_page) {
4291 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4292 } else {
Chris Masona6591712011-07-19 12:04:14 -04004293 char *src_kaddr = page_address(src_page);
Chris Masond1310b22008-01-24 16:13:08 -05004294 char *p = dst_kaddr + dst_off + len;
4295 char *s = src_kaddr + src_off + len;
4296
4297 while (len--)
4298 *--p = *--s;
Chris Masond1310b22008-01-24 16:13:08 -05004299 }
Chris Masond1310b22008-01-24 16:13:08 -05004300}
4301
Sergei Trofimovich33872062011-04-11 21:52:52 +00004302static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4303{
4304 unsigned long distance = (src > dst) ? src - dst : dst - src;
4305 return distance < len;
4306}
4307
Chris Masond1310b22008-01-24 16:13:08 -05004308static void copy_pages(struct page *dst_page, struct page *src_page,
4309 unsigned long dst_off, unsigned long src_off,
4310 unsigned long len)
4311{
Chris Masona6591712011-07-19 12:04:14 -04004312 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05004313 char *src_kaddr;
4314
Sergei Trofimovich33872062011-04-11 21:52:52 +00004315 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04004316 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00004317 } else {
Chris Masond1310b22008-01-24 16:13:08 -05004318 src_kaddr = dst_kaddr;
Sergei Trofimovich33872062011-04-11 21:52:52 +00004319 BUG_ON(areas_overlap(src_off, dst_off, len));
4320 }
Chris Masond1310b22008-01-24 16:13:08 -05004321
4322 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05004323}
4324
4325void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4326 unsigned long src_offset, unsigned long len)
4327{
4328 size_t cur;
4329 size_t dst_off_in_page;
4330 size_t src_off_in_page;
4331 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4332 unsigned long dst_i;
4333 unsigned long src_i;
4334
4335 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004336 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4337 "len %lu dst len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004338 BUG_ON(1);
4339 }
4340 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004341 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4342 "len %lu dst len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004343 BUG_ON(1);
4344 }
4345
Chris Masond3977122009-01-05 21:25:51 -05004346 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004347 dst_off_in_page = (start_offset + dst_offset) &
4348 ((unsigned long)PAGE_CACHE_SIZE - 1);
4349 src_off_in_page = (start_offset + src_offset) &
4350 ((unsigned long)PAGE_CACHE_SIZE - 1);
4351
4352 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4353 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4354
4355 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4356 src_off_in_page));
4357 cur = min_t(unsigned long, cur,
4358 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4359
4360 copy_pages(extent_buffer_page(dst, dst_i),
4361 extent_buffer_page(dst, src_i),
4362 dst_off_in_page, src_off_in_page, cur);
4363
4364 src_offset += cur;
4365 dst_offset += cur;
4366 len -= cur;
4367 }
4368}
Chris Masond1310b22008-01-24 16:13:08 -05004369
4370void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4371 unsigned long src_offset, unsigned long len)
4372{
4373 size_t cur;
4374 size_t dst_off_in_page;
4375 size_t src_off_in_page;
4376 unsigned long dst_end = dst_offset + len - 1;
4377 unsigned long src_end = src_offset + len - 1;
4378 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4379 unsigned long dst_i;
4380 unsigned long src_i;
4381
4382 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004383 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4384 "len %lu len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004385 BUG_ON(1);
4386 }
4387 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004388 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4389 "len %lu len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004390 BUG_ON(1);
4391 }
Sergei Trofimovich33872062011-04-11 21:52:52 +00004392 if (!areas_overlap(src_offset, dst_offset, len)) {
Chris Masond1310b22008-01-24 16:13:08 -05004393 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4394 return;
4395 }
Chris Masond3977122009-01-05 21:25:51 -05004396 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004397 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4398 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4399
4400 dst_off_in_page = (start_offset + dst_end) &
4401 ((unsigned long)PAGE_CACHE_SIZE - 1);
4402 src_off_in_page = (start_offset + src_end) &
4403 ((unsigned long)PAGE_CACHE_SIZE - 1);
4404
4405 cur = min_t(unsigned long, len, src_off_in_page + 1);
4406 cur = min(cur, dst_off_in_page + 1);
4407 move_pages(extent_buffer_page(dst, dst_i),
4408 extent_buffer_page(dst, src_i),
4409 dst_off_in_page - cur + 1,
4410 src_off_in_page - cur + 1, cur);
4411
4412 dst_end -= cur;
4413 src_end -= cur;
4414 len -= cur;
4415 }
4416}
Chris Mason6af118c2008-07-22 11:18:07 -04004417
Miao Xie19fe0a82010-10-26 20:57:29 -04004418static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4419{
4420 struct extent_buffer *eb =
4421 container_of(head, struct extent_buffer, rcu_head);
4422
4423 btrfs_release_extent_buffer(eb);
4424}
4425
Chris Mason6af118c2008-07-22 11:18:07 -04004426int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4427{
4428 u64 start = page_offset(page);
4429 struct extent_buffer *eb;
4430 int ret = 1;
Chris Mason6af118c2008-07-22 11:18:07 -04004431
4432 spin_lock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004433 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
Chris Mason45f49bc2010-11-21 22:27:44 -05004434 if (!eb) {
4435 spin_unlock(&tree->buffer_lock);
4436 return ret;
4437 }
Chris Mason6af118c2008-07-22 11:18:07 -04004438
Chris Masonb9473432009-03-13 11:00:37 -04004439 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
4440 ret = 0;
4441 goto out;
4442 }
Miao Xie897ca6e2010-10-26 20:57:29 -04004443
Miao Xie19fe0a82010-10-26 20:57:29 -04004444 /*
4445 * set @eb->refs to 0 if it is already 1, and then release the @eb.
4446 * Or go back.
4447 */
4448 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
4449 ret = 0;
4450 goto out;
4451 }
4452
4453 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
Chris Mason6af118c2008-07-22 11:18:07 -04004454out:
4455 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004456
4457 /* at this point we can safely release the extent buffer */
4458 if (atomic_read(&eb->refs) == 0)
4459 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Chris Mason6af118c2008-07-22 11:18:07 -04004460 return ret;
4461}