blob: c200d332168955b539f28cdcff8a3158364439f1 [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * alloc.c
5 *
6 * Extent allocs and frees
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/fs.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/highmem.h>
Mark Fasheh60b11392007-02-16 11:46:50 -080030#include <linux/swap.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080031
32#define MLOG_MASK_PREFIX ML_DISK_ALLOC
33#include <cluster/masklog.h>
34
35#include "ocfs2.h"
36
37#include "alloc.h"
Mark Fasheh60b11392007-02-16 11:46:50 -080038#include "aops.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080039#include "dlmglue.h"
40#include "extent_map.h"
41#include "inode.h"
42#include "journal.h"
43#include "localalloc.h"
44#include "suballoc.h"
45#include "sysfile.h"
46#include "file.h"
47#include "super.h"
48#include "uptodate.h"
49
50#include "buffer_head_io.h"
51
Tao Mae7d4cb62008-08-18 17:38:44 +080052/*
53 * ocfs2_extent_tree and ocfs2_extent_tree_operations are used to abstract
54 * the b-tree operations in ocfs2. Now all the b-tree operations are not
55 * limited to ocfs2_dinode only. Any data which need to allocate clusters
56 * to store can use b-tree. And it only needs to implement its ocfs2_extent_tree
57 * and operation.
58 *
59 * ocfs2_extent_tree contains info for the root of the b-tree, it must have a
60 * root ocfs2_extent_list and a root_bh so that they can be used in the b-tree
61 * functions.
62 * ocfs2_extent_tree_operations abstract the normal operations we do for
63 * the root of extent b-tree.
64 */
65struct ocfs2_extent_tree;
66
67struct ocfs2_extent_tree_operations {
Joel Becker35dc0aa2008-08-20 16:25:06 -070068 void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et,
69 u64 blkno);
70 u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et);
71 void (*eo_update_clusters)(struct inode *inode,
72 struct ocfs2_extent_tree *et,
73 u32 new_clusters);
74 int (*eo_sanity_check)(struct inode *inode, struct ocfs2_extent_tree *et);
Tao Mae7d4cb62008-08-18 17:38:44 +080075};
76
77struct ocfs2_extent_tree {
Joel Beckerce1d9ea2008-08-20 16:30:07 -070078 enum ocfs2_extent_tree_type et_type;
79 struct ocfs2_extent_tree_operations *et_ops;
80 struct buffer_head *et_root_bh;
81 struct ocfs2_extent_list *et_root_el;
82 void *et_private;
83 unsigned int et_max_leaf_clusters;
Tao Mae7d4cb62008-08-18 17:38:44 +080084};
85
86static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
87 u64 blkno)
88{
Joel Beckerce1d9ea2008-08-20 16:30:07 -070089 struct ocfs2_dinode *di =
90 (struct ocfs2_dinode *)et->et_root_bh->b_data;
Tao Mae7d4cb62008-08-18 17:38:44 +080091
Joel Beckerce1d9ea2008-08-20 16:30:07 -070092 BUG_ON(et->et_type != OCFS2_DINODE_EXTENT);
Tao Mae7d4cb62008-08-18 17:38:44 +080093 di->i_last_eb_blk = cpu_to_le64(blkno);
94}
95
96static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et)
97{
Joel Beckerce1d9ea2008-08-20 16:30:07 -070098 struct ocfs2_dinode *di =
99 (struct ocfs2_dinode *)et->et_root_bh->b_data;
Tao Mae7d4cb62008-08-18 17:38:44 +0800100
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700101 BUG_ON(et->et_type != OCFS2_DINODE_EXTENT);
Tao Mae7d4cb62008-08-18 17:38:44 +0800102 return le64_to_cpu(di->i_last_eb_blk);
103}
104
105static void ocfs2_dinode_update_clusters(struct inode *inode,
106 struct ocfs2_extent_tree *et,
107 u32 clusters)
108{
109 struct ocfs2_dinode *di =
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700110 (struct ocfs2_dinode *)et->et_root_bh->b_data;
Tao Mae7d4cb62008-08-18 17:38:44 +0800111
112 le32_add_cpu(&di->i_clusters, clusters);
113 spin_lock(&OCFS2_I(inode)->ip_lock);
114 OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters);
115 spin_unlock(&OCFS2_I(inode)->ip_lock);
116}
117
118static int ocfs2_dinode_sanity_check(struct inode *inode,
119 struct ocfs2_extent_tree *et)
120{
121 int ret = 0;
122 struct ocfs2_dinode *di;
123
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700124 BUG_ON(et->et_type != OCFS2_DINODE_EXTENT);
Tao Mae7d4cb62008-08-18 17:38:44 +0800125
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700126 di = (struct ocfs2_dinode *)et->et_root_bh->b_data;
Tao Mae7d4cb62008-08-18 17:38:44 +0800127 if (!OCFS2_IS_VALID_DINODE(di)) {
128 ret = -EIO;
129 ocfs2_error(inode->i_sb,
130 "Inode %llu has invalid path root",
131 (unsigned long long)OCFS2_I(inode)->ip_blkno);
132 }
133
134 return ret;
135}
136
137static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = {
Joel Becker35dc0aa2008-08-20 16:25:06 -0700138 .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk,
139 .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk,
140 .eo_update_clusters = ocfs2_dinode_update_clusters,
141 .eo_sanity_check = ocfs2_dinode_sanity_check,
Tao Mae7d4cb62008-08-18 17:38:44 +0800142};
143
Tao Maf56654c2008-08-18 17:38:48 +0800144static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et,
145 u64 blkno)
146{
147 struct ocfs2_xattr_value_root *xv =
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700148 (struct ocfs2_xattr_value_root *)et->et_private;
Tao Maf56654c2008-08-18 17:38:48 +0800149
150 xv->xr_last_eb_blk = cpu_to_le64(blkno);
151}
152
153static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et)
154{
155 struct ocfs2_xattr_value_root *xv =
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700156 (struct ocfs2_xattr_value_root *) et->et_private;
Tao Maf56654c2008-08-18 17:38:48 +0800157
158 return le64_to_cpu(xv->xr_last_eb_blk);
159}
160
161static void ocfs2_xattr_value_update_clusters(struct inode *inode,
162 struct ocfs2_extent_tree *et,
163 u32 clusters)
164{
165 struct ocfs2_xattr_value_root *xv =
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700166 (struct ocfs2_xattr_value_root *)et->et_private;
Tao Maf56654c2008-08-18 17:38:48 +0800167
168 le32_add_cpu(&xv->xr_clusters, clusters);
169}
170
171static int ocfs2_xattr_value_sanity_check(struct inode *inode,
172 struct ocfs2_extent_tree *et)
173{
174 return 0;
175}
176
177static struct ocfs2_extent_tree_operations ocfs2_xattr_et_ops = {
Joel Becker35dc0aa2008-08-20 16:25:06 -0700178 .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk,
179 .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk,
180 .eo_update_clusters = ocfs2_xattr_value_update_clusters,
181 .eo_sanity_check = ocfs2_xattr_value_sanity_check,
Tao Maf56654c2008-08-18 17:38:48 +0800182};
183
Tao Maba492612008-08-18 17:38:49 +0800184static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
185 u64 blkno)
186{
187 struct ocfs2_xattr_block *xb =
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700188 (struct ocfs2_xattr_block *) et->et_root_bh->b_data;
Tao Maba492612008-08-18 17:38:49 +0800189 struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
190
191 xt->xt_last_eb_blk = cpu_to_le64(blkno);
192}
193
194static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
195{
196 struct ocfs2_xattr_block *xb =
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700197 (struct ocfs2_xattr_block *) et->et_root_bh->b_data;
Tao Maba492612008-08-18 17:38:49 +0800198 struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
199
200 return le64_to_cpu(xt->xt_last_eb_blk);
201}
202
203static void ocfs2_xattr_tree_update_clusters(struct inode *inode,
204 struct ocfs2_extent_tree *et,
205 u32 clusters)
206{
207 struct ocfs2_xattr_block *xb =
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700208 (struct ocfs2_xattr_block *)et->et_root_bh->b_data;
Tao Maba492612008-08-18 17:38:49 +0800209
210 le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters);
211}
212
213static int ocfs2_xattr_tree_sanity_check(struct inode *inode,
214 struct ocfs2_extent_tree *et)
215{
216 return 0;
217}
218
219static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = {
Joel Becker35dc0aa2008-08-20 16:25:06 -0700220 .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk,
221 .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk,
222 .eo_update_clusters = ocfs2_xattr_tree_update_clusters,
223 .eo_sanity_check = ocfs2_xattr_tree_sanity_check,
Tao Maba492612008-08-18 17:38:49 +0800224};
225
Joel Beckerdc0ce612008-08-20 16:48:35 -0700226static void ocfs2_get_extent_tree(struct ocfs2_extent_tree *et,
227 struct inode *inode,
228 struct buffer_head *bh,
229 enum ocfs2_extent_tree_type et_type,
230 void *private)
Tao Mae7d4cb62008-08-18 17:38:44 +0800231{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700232 et->et_type = et_type;
Tao Mae7d4cb62008-08-18 17:38:44 +0800233 get_bh(bh);
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700234 et->et_root_bh = bh;
235 et->et_private = private;
Joel Beckerdc0ce612008-08-20 16:48:35 -0700236 et->et_max_leaf_clusters = 0;
Tao Mae7d4cb62008-08-18 17:38:44 +0800237
Tao Mae7d4cb62008-08-18 17:38:44 +0800238 if (et_type == OCFS2_DINODE_EXTENT) {
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700239 et->et_root_el =
240 &((struct ocfs2_dinode *)bh->b_data)->id2.i_list;
241 et->et_ops = &ocfs2_dinode_et_ops;
Tao Maf56654c2008-08-18 17:38:48 +0800242 } else if (et_type == OCFS2_XATTR_VALUE_EXTENT) {
243 struct ocfs2_xattr_value_root *xv =
244 (struct ocfs2_xattr_value_root *) private;
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700245 et->et_root_el = &xv->xr_list;
246 et->et_ops = &ocfs2_xattr_et_ops;
Tao Maba492612008-08-18 17:38:49 +0800247 } else if (et_type == OCFS2_XATTR_TREE_EXTENT) {
248 struct ocfs2_xattr_block *xb =
249 (struct ocfs2_xattr_block *)bh->b_data;
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700250 et->et_root_el = &xb->xb_attrs.xb_root.xt_list;
251 et->et_ops = &ocfs2_xattr_tree_et_ops;
252 et->et_max_leaf_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
Tao Maca12b7c2008-08-18 17:38:52 +0800253 OCFS2_MAX_XATTR_TREE_LEAF_SIZE);
Tao Mae7d4cb62008-08-18 17:38:44 +0800254 }
Tao Mae7d4cb62008-08-18 17:38:44 +0800255}
256
Joel Beckerdc0ce612008-08-20 16:48:35 -0700257static void ocfs2_put_extent_tree(struct ocfs2_extent_tree *et)
Tao Mae7d4cb62008-08-18 17:38:44 +0800258{
Joel Beckerdc0ce612008-08-20 16:48:35 -0700259 brelse(et->et_root_bh);
Tao Mae7d4cb62008-08-18 17:38:44 +0800260}
261
Joel Becker35dc0aa2008-08-20 16:25:06 -0700262static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et,
263 u64 new_last_eb_blk)
Tao Mae7d4cb62008-08-18 17:38:44 +0800264{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700265 et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk);
Tao Mae7d4cb62008-08-18 17:38:44 +0800266}
267
Joel Becker35dc0aa2008-08-20 16:25:06 -0700268static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et)
Tao Mae7d4cb62008-08-18 17:38:44 +0800269{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700270 return et->et_ops->eo_get_last_eb_blk(et);
Tao Mae7d4cb62008-08-18 17:38:44 +0800271}
272
Joel Becker35dc0aa2008-08-20 16:25:06 -0700273static inline void ocfs2_et_update_clusters(struct inode *inode,
274 struct ocfs2_extent_tree *et,
275 u32 clusters)
Tao Mae7d4cb62008-08-18 17:38:44 +0800276{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700277 et->et_ops->eo_update_clusters(inode, et, clusters);
Joel Becker35dc0aa2008-08-20 16:25:06 -0700278}
279
280static inline int ocfs2_et_sanity_check(struct inode *inode,
281 struct ocfs2_extent_tree *et)
282{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700283 return et->et_ops->eo_sanity_check(inode, et);
Tao Mae7d4cb62008-08-18 17:38:44 +0800284}
285
Mark Fashehccd979b2005-12-15 14:31:24 -0800286static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
Mark Fasheh59a5e412007-06-22 15:52:36 -0700287static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
288 struct ocfs2_extent_block *eb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800289
Mark Fashehdcd05382007-01-16 11:32:23 -0800290/*
291 * Structures which describe a path through a btree, and functions to
292 * manipulate them.
293 *
294 * The idea here is to be as generic as possible with the tree
295 * manipulation code.
296 */
297struct ocfs2_path_item {
298 struct buffer_head *bh;
299 struct ocfs2_extent_list *el;
300};
301
302#define OCFS2_MAX_PATH_DEPTH 5
303
304struct ocfs2_path {
305 int p_tree_depth;
306 struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH];
307};
308
309#define path_root_bh(_path) ((_path)->p_node[0].bh)
310#define path_root_el(_path) ((_path)->p_node[0].el)
311#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh)
312#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el)
313#define path_num_items(_path) ((_path)->p_tree_depth + 1)
314
315/*
316 * Reset the actual path elements so that we can re-use the structure
317 * to build another path. Generally, this involves freeing the buffer
318 * heads.
319 */
320static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
321{
322 int i, start = 0, depth = 0;
323 struct ocfs2_path_item *node;
324
325 if (keep_root)
326 start = 1;
327
328 for(i = start; i < path_num_items(path); i++) {
329 node = &path->p_node[i];
330
331 brelse(node->bh);
332 node->bh = NULL;
333 node->el = NULL;
334 }
335
336 /*
337 * Tree depth may change during truncate, or insert. If we're
338 * keeping the root extent list, then make sure that our path
339 * structure reflects the proper depth.
340 */
341 if (keep_root)
342 depth = le16_to_cpu(path_root_el(path)->l_tree_depth);
343
344 path->p_tree_depth = depth;
345}
346
347static void ocfs2_free_path(struct ocfs2_path *path)
348{
349 if (path) {
350 ocfs2_reinit_path(path, 0);
351 kfree(path);
352 }
353}
354
355/*
Mark Fasheh328d5752007-06-18 10:48:04 -0700356 * All the elements of src into dest. After this call, src could be freed
357 * without affecting dest.
358 *
359 * Both paths should have the same root. Any non-root elements of dest
360 * will be freed.
361 */
362static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src)
363{
364 int i;
365
366 BUG_ON(path_root_bh(dest) != path_root_bh(src));
367 BUG_ON(path_root_el(dest) != path_root_el(src));
368
369 ocfs2_reinit_path(dest, 1);
370
371 for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
372 dest->p_node[i].bh = src->p_node[i].bh;
373 dest->p_node[i].el = src->p_node[i].el;
374
375 if (dest->p_node[i].bh)
376 get_bh(dest->p_node[i].bh);
377 }
378}
379
380/*
Mark Fashehdcd05382007-01-16 11:32:23 -0800381 * Make the *dest path the same as src and re-initialize src path to
382 * have a root only.
383 */
384static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src)
385{
386 int i;
387
388 BUG_ON(path_root_bh(dest) != path_root_bh(src));
389
390 for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
391 brelse(dest->p_node[i].bh);
392
393 dest->p_node[i].bh = src->p_node[i].bh;
394 dest->p_node[i].el = src->p_node[i].el;
395
396 src->p_node[i].bh = NULL;
397 src->p_node[i].el = NULL;
398 }
399}
400
401/*
402 * Insert an extent block at given index.
403 *
404 * This will not take an additional reference on eb_bh.
405 */
406static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index,
407 struct buffer_head *eb_bh)
408{
409 struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data;
410
411 /*
412 * Right now, no root bh is an extent block, so this helps
413 * catch code errors with dinode trees. The assertion can be
414 * safely removed if we ever need to insert extent block
415 * structures at the root.
416 */
417 BUG_ON(index == 0);
418
419 path->p_node[index].bh = eb_bh;
420 path->p_node[index].el = &eb->h_list;
421}
422
423static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
424 struct ocfs2_extent_list *root_el)
425{
426 struct ocfs2_path *path;
427
428 BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH);
429
430 path = kzalloc(sizeof(*path), GFP_NOFS);
431 if (path) {
432 path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth);
433 get_bh(root_bh);
434 path_root_bh(path) = root_bh;
435 path_root_el(path) = root_el;
436 }
437
438 return path;
439}
440
441/*
Mark Fashehdcd05382007-01-16 11:32:23 -0800442 * Convenience function to journal all components in a path.
443 */
444static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle,
445 struct ocfs2_path *path)
446{
447 int i, ret = 0;
448
449 if (!path)
450 goto out;
451
452 for(i = 0; i < path_num_items(path); i++) {
453 ret = ocfs2_journal_access(handle, inode, path->p_node[i].bh,
454 OCFS2_JOURNAL_ACCESS_WRITE);
455 if (ret < 0) {
456 mlog_errno(ret);
457 goto out;
458 }
459 }
460
461out:
462 return ret;
463}
464
Mark Fasheh328d5752007-06-18 10:48:04 -0700465/*
466 * Return the index of the extent record which contains cluster #v_cluster.
467 * -1 is returned if it was not found.
468 *
469 * Should work fine on interior and exterior nodes.
470 */
471int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster)
472{
473 int ret = -1;
474 int i;
475 struct ocfs2_extent_rec *rec;
476 u32 rec_end, rec_start, clusters;
477
478 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
479 rec = &el->l_recs[i];
480
481 rec_start = le32_to_cpu(rec->e_cpos);
482 clusters = ocfs2_rec_clusters(el, rec);
483
484 rec_end = rec_start + clusters;
485
486 if (v_cluster >= rec_start && v_cluster < rec_end) {
487 ret = i;
488 break;
489 }
490 }
491
492 return ret;
493}
494
Mark Fashehdcd05382007-01-16 11:32:23 -0800495enum ocfs2_contig_type {
496 CONTIG_NONE = 0,
497 CONTIG_LEFT,
Mark Fasheh328d5752007-06-18 10:48:04 -0700498 CONTIG_RIGHT,
499 CONTIG_LEFTRIGHT,
Mark Fashehdcd05382007-01-16 11:32:23 -0800500};
501
Mark Fashehe48edee2007-03-07 16:46:57 -0800502
503/*
504 * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
505 * ocfs2_extent_contig only work properly against leaf nodes!
506 */
Mark Fashehdcd05382007-01-16 11:32:23 -0800507static int ocfs2_block_extent_contig(struct super_block *sb,
508 struct ocfs2_extent_rec *ext,
509 u64 blkno)
Mark Fashehccd979b2005-12-15 14:31:24 -0800510{
Mark Fashehe48edee2007-03-07 16:46:57 -0800511 u64 blk_end = le64_to_cpu(ext->e_blkno);
512
513 blk_end += ocfs2_clusters_to_blocks(sb,
514 le16_to_cpu(ext->e_leaf_clusters));
515
516 return blkno == blk_end;
Mark Fashehccd979b2005-12-15 14:31:24 -0800517}
518
Mark Fashehdcd05382007-01-16 11:32:23 -0800519static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
520 struct ocfs2_extent_rec *right)
521{
Mark Fashehe48edee2007-03-07 16:46:57 -0800522 u32 left_range;
523
524 left_range = le32_to_cpu(left->e_cpos) +
525 le16_to_cpu(left->e_leaf_clusters);
526
527 return (left_range == le32_to_cpu(right->e_cpos));
Mark Fashehdcd05382007-01-16 11:32:23 -0800528}
529
530static enum ocfs2_contig_type
531 ocfs2_extent_contig(struct inode *inode,
532 struct ocfs2_extent_rec *ext,
533 struct ocfs2_extent_rec *insert_rec)
534{
535 u64 blkno = le64_to_cpu(insert_rec->e_blkno);
536
Mark Fasheh328d5752007-06-18 10:48:04 -0700537 /*
538 * Refuse to coalesce extent records with different flag
539 * fields - we don't want to mix unwritten extents with user
540 * data.
541 */
542 if (ext->e_flags != insert_rec->e_flags)
543 return CONTIG_NONE;
544
Mark Fashehdcd05382007-01-16 11:32:23 -0800545 if (ocfs2_extents_adjacent(ext, insert_rec) &&
546 ocfs2_block_extent_contig(inode->i_sb, ext, blkno))
547 return CONTIG_RIGHT;
548
549 blkno = le64_to_cpu(ext->e_blkno);
550 if (ocfs2_extents_adjacent(insert_rec, ext) &&
551 ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno))
552 return CONTIG_LEFT;
553
554 return CONTIG_NONE;
555}
556
557/*
558 * NOTE: We can have pretty much any combination of contiguousness and
559 * appending.
560 *
561 * The usefulness of APPEND_TAIL is more in that it lets us know that
562 * we'll have to update the path to that leaf.
563 */
564enum ocfs2_append_type {
565 APPEND_NONE = 0,
566 APPEND_TAIL,
567};
568
Mark Fasheh328d5752007-06-18 10:48:04 -0700569enum ocfs2_split_type {
570 SPLIT_NONE = 0,
571 SPLIT_LEFT,
572 SPLIT_RIGHT,
573};
574
Mark Fashehdcd05382007-01-16 11:32:23 -0800575struct ocfs2_insert_type {
Mark Fasheh328d5752007-06-18 10:48:04 -0700576 enum ocfs2_split_type ins_split;
Mark Fashehdcd05382007-01-16 11:32:23 -0800577 enum ocfs2_append_type ins_appending;
578 enum ocfs2_contig_type ins_contig;
579 int ins_contig_index;
Mark Fashehdcd05382007-01-16 11:32:23 -0800580 int ins_tree_depth;
581};
582
Mark Fasheh328d5752007-06-18 10:48:04 -0700583struct ocfs2_merge_ctxt {
584 enum ocfs2_contig_type c_contig_type;
585 int c_has_empty_extent;
586 int c_split_covers_rec;
Mark Fasheh328d5752007-06-18 10:48:04 -0700587};
588
Mark Fashehccd979b2005-12-15 14:31:24 -0800589/*
590 * How many free extents have we got before we need more meta data?
591 */
592int ocfs2_num_free_extents(struct ocfs2_super *osb,
593 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +0800594 struct buffer_head *root_bh,
Tao Maf56654c2008-08-18 17:38:48 +0800595 enum ocfs2_extent_tree_type type,
596 void *private)
Mark Fashehccd979b2005-12-15 14:31:24 -0800597{
598 int retval;
Tao Mae7d4cb62008-08-18 17:38:44 +0800599 struct ocfs2_extent_list *el = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800600 struct ocfs2_extent_block *eb;
601 struct buffer_head *eb_bh = NULL;
Tao Mae7d4cb62008-08-18 17:38:44 +0800602 u64 last_eb_blk = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800603
604 mlog_entry_void();
605
Tao Mae7d4cb62008-08-18 17:38:44 +0800606 if (type == OCFS2_DINODE_EXTENT) {
607 struct ocfs2_dinode *fe =
608 (struct ocfs2_dinode *)root_bh->b_data;
609 if (!OCFS2_IS_VALID_DINODE(fe)) {
610 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
611 retval = -EIO;
612 goto bail;
613 }
614
615 if (fe->i_last_eb_blk)
616 last_eb_blk = le64_to_cpu(fe->i_last_eb_blk);
617 el = &fe->id2.i_list;
Tao Maf56654c2008-08-18 17:38:48 +0800618 } else if (type == OCFS2_XATTR_VALUE_EXTENT) {
619 struct ocfs2_xattr_value_root *xv =
620 (struct ocfs2_xattr_value_root *) private;
621
622 last_eb_blk = le64_to_cpu(xv->xr_last_eb_blk);
623 el = &xv->xr_list;
Tao Maba492612008-08-18 17:38:49 +0800624 } else if (type == OCFS2_XATTR_TREE_EXTENT) {
625 struct ocfs2_xattr_block *xb =
626 (struct ocfs2_xattr_block *)root_bh->b_data;
627
628 last_eb_blk = le64_to_cpu(xb->xb_attrs.xb_root.xt_last_eb_blk);
629 el = &xb->xb_attrs.xb_root.xt_list;
Mark Fashehccd979b2005-12-15 14:31:24 -0800630 }
631
Tao Mae7d4cb62008-08-18 17:38:44 +0800632 if (last_eb_blk) {
633 retval = ocfs2_read_block(osb, last_eb_blk,
Mark Fashehccd979b2005-12-15 14:31:24 -0800634 &eb_bh, OCFS2_BH_CACHED, inode);
635 if (retval < 0) {
636 mlog_errno(retval);
637 goto bail;
638 }
639 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
640 el = &eb->h_list;
Tao Mae7d4cb62008-08-18 17:38:44 +0800641 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800642
643 BUG_ON(el->l_tree_depth != 0);
644
645 retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
646bail:
647 if (eb_bh)
648 brelse(eb_bh);
649
650 mlog_exit(retval);
651 return retval;
652}
653
654/* expects array to already be allocated
655 *
656 * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and
657 * l_count for you
658 */
659static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700660 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800661 struct inode *inode,
662 int wanted,
663 struct ocfs2_alloc_context *meta_ac,
664 struct buffer_head *bhs[])
665{
666 int count, status, i;
667 u16 suballoc_bit_start;
668 u32 num_got;
669 u64 first_blkno;
670 struct ocfs2_extent_block *eb;
671
672 mlog_entry_void();
673
674 count = 0;
675 while (count < wanted) {
676 status = ocfs2_claim_metadata(osb,
677 handle,
678 meta_ac,
679 wanted - count,
680 &suballoc_bit_start,
681 &num_got,
682 &first_blkno);
683 if (status < 0) {
684 mlog_errno(status);
685 goto bail;
686 }
687
688 for(i = count; i < (num_got + count); i++) {
689 bhs[i] = sb_getblk(osb->sb, first_blkno);
690 if (bhs[i] == NULL) {
691 status = -EIO;
692 mlog_errno(status);
693 goto bail;
694 }
695 ocfs2_set_new_buffer_uptodate(inode, bhs[i]);
696
697 status = ocfs2_journal_access(handle, inode, bhs[i],
698 OCFS2_JOURNAL_ACCESS_CREATE);
699 if (status < 0) {
700 mlog_errno(status);
701 goto bail;
702 }
703
704 memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
705 eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
706 /* Ok, setup the minimal stuff here. */
707 strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
708 eb->h_blkno = cpu_to_le64(first_blkno);
709 eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
Mark Fashehccd979b2005-12-15 14:31:24 -0800710 eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
Mark Fashehccd979b2005-12-15 14:31:24 -0800711 eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
712 eb->h_list.l_count =
713 cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
714
715 suballoc_bit_start++;
716 first_blkno++;
717
718 /* We'll also be dirtied by the caller, so
719 * this isn't absolutely necessary. */
720 status = ocfs2_journal_dirty(handle, bhs[i]);
721 if (status < 0) {
722 mlog_errno(status);
723 goto bail;
724 }
725 }
726
727 count += num_got;
728 }
729
730 status = 0;
731bail:
732 if (status < 0) {
733 for(i = 0; i < wanted; i++) {
734 if (bhs[i])
735 brelse(bhs[i]);
736 bhs[i] = NULL;
737 }
738 }
739 mlog_exit(status);
740 return status;
741}
742
743/*
Mark Fashehdcd05382007-01-16 11:32:23 -0800744 * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth().
745 *
746 * Returns the sum of the rightmost extent rec logical offset and
747 * cluster count.
748 *
749 * ocfs2_add_branch() uses this to determine what logical cluster
750 * value should be populated into the leftmost new branch records.
751 *
752 * ocfs2_shift_tree_depth() uses this to determine the # clusters
753 * value for the new topmost tree record.
754 */
755static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
756{
757 int i;
758
759 i = le16_to_cpu(el->l_next_free_rec) - 1;
760
761 return le32_to_cpu(el->l_recs[i].e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -0800762 ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -0800763}
764
765/*
Mark Fashehccd979b2005-12-15 14:31:24 -0800766 * Add an entire tree branch to our inode. eb_bh is the extent block
767 * to start at, if we don't want to start the branch at the dinode
768 * structure.
769 *
770 * last_eb_bh is required as we have to update it's next_leaf pointer
771 * for the new last extent block.
772 *
773 * the new branch will be 'empty' in the sense that every block will
Mark Fashehe48edee2007-03-07 16:46:57 -0800774 * contain a single record with cluster count == 0.
Mark Fashehccd979b2005-12-15 14:31:24 -0800775 */
776static int ocfs2_add_branch(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700777 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800778 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +0800779 struct ocfs2_extent_tree *et,
Mark Fashehccd979b2005-12-15 14:31:24 -0800780 struct buffer_head *eb_bh,
Mark Fasheh328d5752007-06-18 10:48:04 -0700781 struct buffer_head **last_eb_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -0800782 struct ocfs2_alloc_context *meta_ac)
783{
784 int status, new_blocks, i;
785 u64 next_blkno, new_last_eb_blk;
786 struct buffer_head *bh;
787 struct buffer_head **new_eb_bhs = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800788 struct ocfs2_extent_block *eb;
789 struct ocfs2_extent_list *eb_el;
790 struct ocfs2_extent_list *el;
Mark Fashehdcd05382007-01-16 11:32:23 -0800791 u32 new_cpos;
Mark Fashehccd979b2005-12-15 14:31:24 -0800792
793 mlog_entry_void();
794
Mark Fasheh328d5752007-06-18 10:48:04 -0700795 BUG_ON(!last_eb_bh || !*last_eb_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800796
Mark Fashehccd979b2005-12-15 14:31:24 -0800797 if (eb_bh) {
798 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
799 el = &eb->h_list;
800 } else
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700801 el = et->et_root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -0800802
803 /* we never add a branch to a leaf. */
804 BUG_ON(!el->l_tree_depth);
805
806 new_blocks = le16_to_cpu(el->l_tree_depth);
807
808 /* allocate the number of new eb blocks we need */
809 new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
810 GFP_KERNEL);
811 if (!new_eb_bhs) {
812 status = -ENOMEM;
813 mlog_errno(status);
814 goto bail;
815 }
816
817 status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks,
818 meta_ac, new_eb_bhs);
819 if (status < 0) {
820 mlog_errno(status);
821 goto bail;
822 }
823
Mark Fasheh328d5752007-06-18 10:48:04 -0700824 eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
Mark Fashehdcd05382007-01-16 11:32:23 -0800825 new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
826
Mark Fashehccd979b2005-12-15 14:31:24 -0800827 /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
828 * linked with the rest of the tree.
829 * conversly, new_eb_bhs[0] is the new bottommost leaf.
830 *
831 * when we leave the loop, new_last_eb_blk will point to the
832 * newest leaf, and next_blkno will point to the topmost extent
833 * block. */
834 next_blkno = new_last_eb_blk = 0;
835 for(i = 0; i < new_blocks; i++) {
836 bh = new_eb_bhs[i];
837 eb = (struct ocfs2_extent_block *) bh->b_data;
838 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
839 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
840 status = -EIO;
841 goto bail;
842 }
843 eb_el = &eb->h_list;
844
845 status = ocfs2_journal_access(handle, inode, bh,
846 OCFS2_JOURNAL_ACCESS_CREATE);
847 if (status < 0) {
848 mlog_errno(status);
849 goto bail;
850 }
851
852 eb->h_next_leaf_blk = 0;
853 eb_el->l_tree_depth = cpu_to_le16(i);
854 eb_el->l_next_free_rec = cpu_to_le16(1);
Mark Fashehdcd05382007-01-16 11:32:23 -0800855 /*
856 * This actually counts as an empty extent as
857 * c_clusters == 0
858 */
859 eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
Mark Fashehccd979b2005-12-15 14:31:24 -0800860 eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
Mark Fashehe48edee2007-03-07 16:46:57 -0800861 /*
862 * eb_el isn't always an interior node, but even leaf
863 * nodes want a zero'd flags and reserved field so
864 * this gets the whole 32 bits regardless of use.
865 */
866 eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800867 if (!eb_el->l_tree_depth)
868 new_last_eb_blk = le64_to_cpu(eb->h_blkno);
869
870 status = ocfs2_journal_dirty(handle, bh);
871 if (status < 0) {
872 mlog_errno(status);
873 goto bail;
874 }
875
876 next_blkno = le64_to_cpu(eb->h_blkno);
877 }
878
879 /* This is a bit hairy. We want to update up to three blocks
880 * here without leaving any of them in an inconsistent state
881 * in case of error. We don't have to worry about
882 * journal_dirty erroring as it won't unless we've aborted the
883 * handle (in which case we would never be here) so reserving
884 * the write with journal_access is all we need to do. */
Mark Fasheh328d5752007-06-18 10:48:04 -0700885 status = ocfs2_journal_access(handle, inode, *last_eb_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -0800886 OCFS2_JOURNAL_ACCESS_WRITE);
887 if (status < 0) {
888 mlog_errno(status);
889 goto bail;
890 }
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700891 status = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -0800892 OCFS2_JOURNAL_ACCESS_WRITE);
893 if (status < 0) {
894 mlog_errno(status);
895 goto bail;
896 }
897 if (eb_bh) {
898 status = ocfs2_journal_access(handle, inode, eb_bh,
899 OCFS2_JOURNAL_ACCESS_WRITE);
900 if (status < 0) {
901 mlog_errno(status);
902 goto bail;
903 }
904 }
905
906 /* Link the new branch into the rest of the tree (el will
Tao Mae7d4cb62008-08-18 17:38:44 +0800907 * either be on the root_bh, or the extent block passed in. */
Mark Fashehccd979b2005-12-15 14:31:24 -0800908 i = le16_to_cpu(el->l_next_free_rec);
909 el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
Mark Fashehdcd05382007-01-16 11:32:23 -0800910 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -0800911 el->l_recs[i].e_int_clusters = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800912 le16_add_cpu(&el->l_next_free_rec, 1);
913
914 /* fe needs a new last extent block pointer, as does the
915 * next_leaf on the previously last-extent-block. */
Joel Becker35dc0aa2008-08-20 16:25:06 -0700916 ocfs2_et_set_last_eb_blk(et, new_last_eb_blk);
Mark Fashehccd979b2005-12-15 14:31:24 -0800917
Mark Fasheh328d5752007-06-18 10:48:04 -0700918 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
Mark Fashehccd979b2005-12-15 14:31:24 -0800919 eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
920
Mark Fasheh328d5752007-06-18 10:48:04 -0700921 status = ocfs2_journal_dirty(handle, *last_eb_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800922 if (status < 0)
923 mlog_errno(status);
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700924 status = ocfs2_journal_dirty(handle, et->et_root_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800925 if (status < 0)
926 mlog_errno(status);
927 if (eb_bh) {
928 status = ocfs2_journal_dirty(handle, eb_bh);
929 if (status < 0)
930 mlog_errno(status);
931 }
932
Mark Fasheh328d5752007-06-18 10:48:04 -0700933 /*
934 * Some callers want to track the rightmost leaf so pass it
935 * back here.
936 */
937 brelse(*last_eb_bh);
938 get_bh(new_eb_bhs[0]);
939 *last_eb_bh = new_eb_bhs[0];
940
Mark Fashehccd979b2005-12-15 14:31:24 -0800941 status = 0;
942bail:
943 if (new_eb_bhs) {
944 for (i = 0; i < new_blocks; i++)
945 if (new_eb_bhs[i])
946 brelse(new_eb_bhs[i]);
947 kfree(new_eb_bhs);
948 }
949
950 mlog_exit(status);
951 return status;
952}
953
954/*
955 * adds another level to the allocation tree.
956 * returns back the new extent block so you can add a branch to it
957 * after this call.
958 */
959static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700960 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800961 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +0800962 struct ocfs2_extent_tree *et,
Mark Fashehccd979b2005-12-15 14:31:24 -0800963 struct ocfs2_alloc_context *meta_ac,
964 struct buffer_head **ret_new_eb_bh)
965{
966 int status, i;
Mark Fashehdcd05382007-01-16 11:32:23 -0800967 u32 new_clusters;
Mark Fashehccd979b2005-12-15 14:31:24 -0800968 struct buffer_head *new_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800969 struct ocfs2_extent_block *eb;
Tao Mae7d4cb62008-08-18 17:38:44 +0800970 struct ocfs2_extent_list *root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -0800971 struct ocfs2_extent_list *eb_el;
972
973 mlog_entry_void();
974
975 status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac,
976 &new_eb_bh);
977 if (status < 0) {
978 mlog_errno(status);
979 goto bail;
980 }
981
982 eb = (struct ocfs2_extent_block *) new_eb_bh->b_data;
983 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
984 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
985 status = -EIO;
986 goto bail;
987 }
988
989 eb_el = &eb->h_list;
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700990 root_el = et->et_root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -0800991
992 status = ocfs2_journal_access(handle, inode, new_eb_bh,
993 OCFS2_JOURNAL_ACCESS_CREATE);
994 if (status < 0) {
995 mlog_errno(status);
996 goto bail;
997 }
998
Tao Mae7d4cb62008-08-18 17:38:44 +0800999 /* copy the root extent list data into the new extent block */
1000 eb_el->l_tree_depth = root_el->l_tree_depth;
1001 eb_el->l_next_free_rec = root_el->l_next_free_rec;
1002 for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++)
1003 eb_el->l_recs[i] = root_el->l_recs[i];
Mark Fashehccd979b2005-12-15 14:31:24 -08001004
1005 status = ocfs2_journal_dirty(handle, new_eb_bh);
1006 if (status < 0) {
1007 mlog_errno(status);
1008 goto bail;
1009 }
1010
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001011 status = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -08001012 OCFS2_JOURNAL_ACCESS_WRITE);
1013 if (status < 0) {
1014 mlog_errno(status);
1015 goto bail;
1016 }
1017
Mark Fashehdcd05382007-01-16 11:32:23 -08001018 new_clusters = ocfs2_sum_rightmost_rec(eb_el);
1019
Tao Mae7d4cb62008-08-18 17:38:44 +08001020 /* update root_bh now */
1021 le16_add_cpu(&root_el->l_tree_depth, 1);
1022 root_el->l_recs[0].e_cpos = 0;
1023 root_el->l_recs[0].e_blkno = eb->h_blkno;
1024 root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
1025 for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
1026 memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
1027 root_el->l_next_free_rec = cpu_to_le16(1);
Mark Fashehccd979b2005-12-15 14:31:24 -08001028
1029 /* If this is our 1st tree depth shift, then last_eb_blk
1030 * becomes the allocated extent block */
Tao Mae7d4cb62008-08-18 17:38:44 +08001031 if (root_el->l_tree_depth == cpu_to_le16(1))
Joel Becker35dc0aa2008-08-20 16:25:06 -07001032 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
Mark Fashehccd979b2005-12-15 14:31:24 -08001033
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001034 status = ocfs2_journal_dirty(handle, et->et_root_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08001035 if (status < 0) {
1036 mlog_errno(status);
1037 goto bail;
1038 }
1039
1040 *ret_new_eb_bh = new_eb_bh;
1041 new_eb_bh = NULL;
1042 status = 0;
1043bail:
1044 if (new_eb_bh)
1045 brelse(new_eb_bh);
1046
1047 mlog_exit(status);
1048 return status;
1049}
1050
1051/*
Mark Fashehccd979b2005-12-15 14:31:24 -08001052 * Should only be called when there is no space left in any of the
1053 * leaf nodes. What we want to do is find the lowest tree depth
1054 * non-leaf extent block with room for new records. There are three
1055 * valid results of this search:
1056 *
1057 * 1) a lowest extent block is found, then we pass it back in
1058 * *lowest_eb_bh and return '0'
1059 *
Tao Mae7d4cb62008-08-18 17:38:44 +08001060 * 2) the search fails to find anything, but the root_el has room. We
Mark Fashehccd979b2005-12-15 14:31:24 -08001061 * pass NULL back in *lowest_eb_bh, but still return '0'
1062 *
Tao Mae7d4cb62008-08-18 17:38:44 +08001063 * 3) the search fails to find anything AND the root_el is full, in
Mark Fashehccd979b2005-12-15 14:31:24 -08001064 * which case we return > 0
1065 *
1066 * return status < 0 indicates an error.
1067 */
1068static int ocfs2_find_branch_target(struct ocfs2_super *osb,
1069 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +08001070 struct ocfs2_extent_tree *et,
Mark Fashehccd979b2005-12-15 14:31:24 -08001071 struct buffer_head **target_bh)
1072{
1073 int status = 0, i;
1074 u64 blkno;
Mark Fashehccd979b2005-12-15 14:31:24 -08001075 struct ocfs2_extent_block *eb;
1076 struct ocfs2_extent_list *el;
1077 struct buffer_head *bh = NULL;
1078 struct buffer_head *lowest_bh = NULL;
1079
1080 mlog_entry_void();
1081
1082 *target_bh = NULL;
1083
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001084 el = et->et_root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -08001085
1086 while(le16_to_cpu(el->l_tree_depth) > 1) {
1087 if (le16_to_cpu(el->l_next_free_rec) == 0) {
Mark Fashehb0697052006-03-03 10:24:33 -08001088 ocfs2_error(inode->i_sb, "Dinode %llu has empty "
Mark Fashehccd979b2005-12-15 14:31:24 -08001089 "extent list (next_free_rec == 0)",
Mark Fashehb0697052006-03-03 10:24:33 -08001090 (unsigned long long)OCFS2_I(inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08001091 status = -EIO;
1092 goto bail;
1093 }
1094 i = le16_to_cpu(el->l_next_free_rec) - 1;
1095 blkno = le64_to_cpu(el->l_recs[i].e_blkno);
1096 if (!blkno) {
Mark Fashehb0697052006-03-03 10:24:33 -08001097 ocfs2_error(inode->i_sb, "Dinode %llu has extent "
Mark Fashehccd979b2005-12-15 14:31:24 -08001098 "list where extent # %d has no physical "
1099 "block start",
Mark Fashehb0697052006-03-03 10:24:33 -08001100 (unsigned long long)OCFS2_I(inode)->ip_blkno, i);
Mark Fashehccd979b2005-12-15 14:31:24 -08001101 status = -EIO;
1102 goto bail;
1103 }
1104
1105 if (bh) {
1106 brelse(bh);
1107 bh = NULL;
1108 }
1109
1110 status = ocfs2_read_block(osb, blkno, &bh, OCFS2_BH_CACHED,
1111 inode);
1112 if (status < 0) {
1113 mlog_errno(status);
1114 goto bail;
1115 }
1116
1117 eb = (struct ocfs2_extent_block *) bh->b_data;
1118 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
1119 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
1120 status = -EIO;
1121 goto bail;
1122 }
1123 el = &eb->h_list;
1124
1125 if (le16_to_cpu(el->l_next_free_rec) <
1126 le16_to_cpu(el->l_count)) {
1127 if (lowest_bh)
1128 brelse(lowest_bh);
1129 lowest_bh = bh;
1130 get_bh(lowest_bh);
1131 }
1132 }
1133
1134 /* If we didn't find one and the fe doesn't have any room,
1135 * then return '1' */
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001136 el = et->et_root_el;
Tao Mae7d4cb62008-08-18 17:38:44 +08001137 if (!lowest_bh && (el->l_next_free_rec == el->l_count))
Mark Fashehccd979b2005-12-15 14:31:24 -08001138 status = 1;
1139
1140 *target_bh = lowest_bh;
1141bail:
1142 if (bh)
1143 brelse(bh);
1144
1145 mlog_exit(status);
1146 return status;
1147}
1148
Mark Fashehe48edee2007-03-07 16:46:57 -08001149/*
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001150 * Grow a b-tree so that it has more records.
1151 *
1152 * We might shift the tree depth in which case existing paths should
1153 * be considered invalid.
1154 *
1155 * Tree depth after the grow is returned via *final_depth.
Mark Fasheh328d5752007-06-18 10:48:04 -07001156 *
1157 * *last_eb_bh will be updated by ocfs2_add_branch().
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001158 */
1159static int ocfs2_grow_tree(struct inode *inode, handle_t *handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08001160 struct ocfs2_extent_tree *et, int *final_depth,
Mark Fasheh328d5752007-06-18 10:48:04 -07001161 struct buffer_head **last_eb_bh,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001162 struct ocfs2_alloc_context *meta_ac)
1163{
1164 int ret, shift;
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001165 struct ocfs2_extent_list *el = et->et_root_el;
Tao Mae7d4cb62008-08-18 17:38:44 +08001166 int depth = le16_to_cpu(el->l_tree_depth);
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001167 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1168 struct buffer_head *bh = NULL;
1169
1170 BUG_ON(meta_ac == NULL);
1171
Tao Mae7d4cb62008-08-18 17:38:44 +08001172 shift = ocfs2_find_branch_target(osb, inode, et, &bh);
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001173 if (shift < 0) {
1174 ret = shift;
1175 mlog_errno(ret);
1176 goto out;
1177 }
1178
1179 /* We traveled all the way to the bottom of the allocation tree
1180 * and didn't find room for any more extents - we need to add
1181 * another tree level */
1182 if (shift) {
1183 BUG_ON(bh);
1184 mlog(0, "need to shift tree depth (current = %d)\n", depth);
1185
1186 /* ocfs2_shift_tree_depth will return us a buffer with
1187 * the new extent block (so we can pass that to
1188 * ocfs2_add_branch). */
Tao Mae7d4cb62008-08-18 17:38:44 +08001189 ret = ocfs2_shift_tree_depth(osb, handle, inode, et,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001190 meta_ac, &bh);
1191 if (ret < 0) {
1192 mlog_errno(ret);
1193 goto out;
1194 }
1195 depth++;
Mark Fasheh328d5752007-06-18 10:48:04 -07001196 if (depth == 1) {
1197 /*
1198 * Special case: we have room now if we shifted from
1199 * tree_depth 0, so no more work needs to be done.
1200 *
1201 * We won't be calling add_branch, so pass
1202 * back *last_eb_bh as the new leaf. At depth
1203 * zero, it should always be null so there's
1204 * no reason to brelse.
1205 */
1206 BUG_ON(*last_eb_bh);
1207 get_bh(bh);
1208 *last_eb_bh = bh;
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001209 goto out;
Mark Fasheh328d5752007-06-18 10:48:04 -07001210 }
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001211 }
1212
1213 /* call ocfs2_add_branch to add the final part of the tree with
1214 * the new data. */
1215 mlog(0, "add branch. bh = %p\n", bh);
Tao Mae7d4cb62008-08-18 17:38:44 +08001216 ret = ocfs2_add_branch(osb, handle, inode, et, bh, last_eb_bh,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001217 meta_ac);
1218 if (ret < 0) {
1219 mlog_errno(ret);
1220 goto out;
1221 }
1222
1223out:
1224 if (final_depth)
1225 *final_depth = depth;
1226 brelse(bh);
1227 return ret;
1228}
1229
1230/*
Mark Fashehdcd05382007-01-16 11:32:23 -08001231 * This function will discard the rightmost extent record.
1232 */
1233static void ocfs2_shift_records_right(struct ocfs2_extent_list *el)
1234{
1235 int next_free = le16_to_cpu(el->l_next_free_rec);
1236 int count = le16_to_cpu(el->l_count);
1237 unsigned int num_bytes;
1238
1239 BUG_ON(!next_free);
1240 /* This will cause us to go off the end of our extent list. */
1241 BUG_ON(next_free >= count);
1242
1243 num_bytes = sizeof(struct ocfs2_extent_rec) * next_free;
1244
1245 memmove(&el->l_recs[1], &el->l_recs[0], num_bytes);
1246}
1247
1248static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
1249 struct ocfs2_extent_rec *insert_rec)
1250{
1251 int i, insert_index, next_free, has_empty, num_bytes;
1252 u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos);
1253 struct ocfs2_extent_rec *rec;
1254
1255 next_free = le16_to_cpu(el->l_next_free_rec);
1256 has_empty = ocfs2_is_empty_extent(&el->l_recs[0]);
1257
1258 BUG_ON(!next_free);
1259
1260 /* The tree code before us didn't allow enough room in the leaf. */
Julia Lawallb1f35502008-03-04 15:21:05 -08001261 BUG_ON(el->l_next_free_rec == el->l_count && !has_empty);
Mark Fashehdcd05382007-01-16 11:32:23 -08001262
1263 /*
1264 * The easiest way to approach this is to just remove the
1265 * empty extent and temporarily decrement next_free.
1266 */
1267 if (has_empty) {
1268 /*
1269 * If next_free was 1 (only an empty extent), this
1270 * loop won't execute, which is fine. We still want
1271 * the decrement above to happen.
1272 */
1273 for(i = 0; i < (next_free - 1); i++)
1274 el->l_recs[i] = el->l_recs[i+1];
1275
1276 next_free--;
1277 }
1278
1279 /*
1280 * Figure out what the new record index should be.
1281 */
1282 for(i = 0; i < next_free; i++) {
1283 rec = &el->l_recs[i];
1284
1285 if (insert_cpos < le32_to_cpu(rec->e_cpos))
1286 break;
1287 }
1288 insert_index = i;
1289
1290 mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n",
1291 insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count));
1292
1293 BUG_ON(insert_index < 0);
1294 BUG_ON(insert_index >= le16_to_cpu(el->l_count));
1295 BUG_ON(insert_index > next_free);
1296
1297 /*
1298 * No need to memmove if we're just adding to the tail.
1299 */
1300 if (insert_index != next_free) {
1301 BUG_ON(next_free >= le16_to_cpu(el->l_count));
1302
1303 num_bytes = next_free - insert_index;
1304 num_bytes *= sizeof(struct ocfs2_extent_rec);
1305 memmove(&el->l_recs[insert_index + 1],
1306 &el->l_recs[insert_index],
1307 num_bytes);
1308 }
1309
1310 /*
1311 * Either we had an empty extent, and need to re-increment or
1312 * there was no empty extent on a non full rightmost leaf node,
1313 * in which case we still need to increment.
1314 */
1315 next_free++;
1316 el->l_next_free_rec = cpu_to_le16(next_free);
1317 /*
1318 * Make sure none of the math above just messed up our tree.
1319 */
1320 BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count));
1321
1322 el->l_recs[insert_index] = *insert_rec;
1323
1324}
1325
Mark Fasheh328d5752007-06-18 10:48:04 -07001326static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el)
1327{
1328 int size, num_recs = le16_to_cpu(el->l_next_free_rec);
1329
1330 BUG_ON(num_recs == 0);
1331
1332 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
1333 num_recs--;
1334 size = num_recs * sizeof(struct ocfs2_extent_rec);
1335 memmove(&el->l_recs[0], &el->l_recs[1], size);
1336 memset(&el->l_recs[num_recs], 0,
1337 sizeof(struct ocfs2_extent_rec));
1338 el->l_next_free_rec = cpu_to_le16(num_recs);
1339 }
1340}
1341
Mark Fashehdcd05382007-01-16 11:32:23 -08001342/*
1343 * Create an empty extent record .
1344 *
1345 * l_next_free_rec may be updated.
1346 *
1347 * If an empty extent already exists do nothing.
1348 */
1349static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el)
1350{
1351 int next_free = le16_to_cpu(el->l_next_free_rec);
1352
Mark Fashehe48edee2007-03-07 16:46:57 -08001353 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
1354
Mark Fashehdcd05382007-01-16 11:32:23 -08001355 if (next_free == 0)
1356 goto set_and_inc;
1357
1358 if (ocfs2_is_empty_extent(&el->l_recs[0]))
1359 return;
1360
1361 mlog_bug_on_msg(el->l_count == el->l_next_free_rec,
1362 "Asked to create an empty extent in a full list:\n"
1363 "count = %u, tree depth = %u",
1364 le16_to_cpu(el->l_count),
1365 le16_to_cpu(el->l_tree_depth));
1366
1367 ocfs2_shift_records_right(el);
1368
1369set_and_inc:
1370 le16_add_cpu(&el->l_next_free_rec, 1);
1371 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
1372}
1373
1374/*
1375 * For a rotation which involves two leaf nodes, the "root node" is
1376 * the lowest level tree node which contains a path to both leafs. This
1377 * resulting set of information can be used to form a complete "subtree"
1378 *
1379 * This function is passed two full paths from the dinode down to a
1380 * pair of adjacent leaves. It's task is to figure out which path
1381 * index contains the subtree root - this can be the root index itself
1382 * in a worst-case rotation.
1383 *
1384 * The array index of the subtree root is passed back.
1385 */
1386static int ocfs2_find_subtree_root(struct inode *inode,
1387 struct ocfs2_path *left,
1388 struct ocfs2_path *right)
1389{
1390 int i = 0;
1391
1392 /*
1393 * Check that the caller passed in two paths from the same tree.
1394 */
1395 BUG_ON(path_root_bh(left) != path_root_bh(right));
1396
1397 do {
1398 i++;
1399
1400 /*
1401 * The caller didn't pass two adjacent paths.
1402 */
1403 mlog_bug_on_msg(i > left->p_tree_depth,
1404 "Inode %lu, left depth %u, right depth %u\n"
1405 "left leaf blk %llu, right leaf blk %llu\n",
1406 inode->i_ino, left->p_tree_depth,
1407 right->p_tree_depth,
1408 (unsigned long long)path_leaf_bh(left)->b_blocknr,
1409 (unsigned long long)path_leaf_bh(right)->b_blocknr);
1410 } while (left->p_node[i].bh->b_blocknr ==
1411 right->p_node[i].bh->b_blocknr);
1412
1413 return i - 1;
1414}
1415
1416typedef void (path_insert_t)(void *, struct buffer_head *);
1417
1418/*
1419 * Traverse a btree path in search of cpos, starting at root_el.
1420 *
1421 * This code can be called with a cpos larger than the tree, in which
1422 * case it will return the rightmost path.
1423 */
1424static int __ocfs2_find_path(struct inode *inode,
1425 struct ocfs2_extent_list *root_el, u32 cpos,
1426 path_insert_t *func, void *data)
1427{
1428 int i, ret = 0;
1429 u32 range;
1430 u64 blkno;
1431 struct buffer_head *bh = NULL;
1432 struct ocfs2_extent_block *eb;
1433 struct ocfs2_extent_list *el;
1434 struct ocfs2_extent_rec *rec;
1435 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1436
1437 el = root_el;
1438 while (el->l_tree_depth) {
1439 if (le16_to_cpu(el->l_next_free_rec) == 0) {
1440 ocfs2_error(inode->i_sb,
1441 "Inode %llu has empty extent list at "
1442 "depth %u\n",
1443 (unsigned long long)oi->ip_blkno,
1444 le16_to_cpu(el->l_tree_depth));
1445 ret = -EROFS;
1446 goto out;
1447
1448 }
1449
1450 for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) {
1451 rec = &el->l_recs[i];
1452
1453 /*
1454 * In the case that cpos is off the allocation
1455 * tree, this should just wind up returning the
1456 * rightmost record.
1457 */
1458 range = le32_to_cpu(rec->e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -08001459 ocfs2_rec_clusters(el, rec);
Mark Fashehdcd05382007-01-16 11:32:23 -08001460 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
1461 break;
1462 }
1463
1464 blkno = le64_to_cpu(el->l_recs[i].e_blkno);
1465 if (blkno == 0) {
1466 ocfs2_error(inode->i_sb,
1467 "Inode %llu has bad blkno in extent list "
1468 "at depth %u (index %d)\n",
1469 (unsigned long long)oi->ip_blkno,
1470 le16_to_cpu(el->l_tree_depth), i);
1471 ret = -EROFS;
1472 goto out;
1473 }
1474
1475 brelse(bh);
1476 bh = NULL;
1477 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno,
1478 &bh, OCFS2_BH_CACHED, inode);
1479 if (ret) {
1480 mlog_errno(ret);
1481 goto out;
1482 }
1483
1484 eb = (struct ocfs2_extent_block *) bh->b_data;
1485 el = &eb->h_list;
1486 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
1487 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
1488 ret = -EIO;
1489 goto out;
1490 }
1491
1492 if (le16_to_cpu(el->l_next_free_rec) >
1493 le16_to_cpu(el->l_count)) {
1494 ocfs2_error(inode->i_sb,
1495 "Inode %llu has bad count in extent list "
1496 "at block %llu (next free=%u, count=%u)\n",
1497 (unsigned long long)oi->ip_blkno,
1498 (unsigned long long)bh->b_blocknr,
1499 le16_to_cpu(el->l_next_free_rec),
1500 le16_to_cpu(el->l_count));
1501 ret = -EROFS;
1502 goto out;
1503 }
1504
1505 if (func)
1506 func(data, bh);
1507 }
1508
1509out:
1510 /*
1511 * Catch any trailing bh that the loop didn't handle.
1512 */
1513 brelse(bh);
1514
1515 return ret;
1516}
1517
1518/*
1519 * Given an initialized path (that is, it has a valid root extent
1520 * list), this function will traverse the btree in search of the path
1521 * which would contain cpos.
1522 *
1523 * The path traveled is recorded in the path structure.
1524 *
1525 * Note that this will not do any comparisons on leaf node extent
1526 * records, so it will work fine in the case that we just added a tree
1527 * branch.
1528 */
1529struct find_path_data {
1530 int index;
1531 struct ocfs2_path *path;
1532};
1533static void find_path_ins(void *data, struct buffer_head *bh)
1534{
1535 struct find_path_data *fp = data;
1536
1537 get_bh(bh);
1538 ocfs2_path_insert_eb(fp->path, fp->index, bh);
1539 fp->index++;
1540}
1541static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path,
1542 u32 cpos)
1543{
1544 struct find_path_data data;
1545
1546 data.index = 1;
1547 data.path = path;
1548 return __ocfs2_find_path(inode, path_root_el(path), cpos,
1549 find_path_ins, &data);
1550}
1551
1552static void find_leaf_ins(void *data, struct buffer_head *bh)
1553{
1554 struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data;
1555 struct ocfs2_extent_list *el = &eb->h_list;
1556 struct buffer_head **ret = data;
1557
1558 /* We want to retain only the leaf block. */
1559 if (le16_to_cpu(el->l_tree_depth) == 0) {
1560 get_bh(bh);
1561 *ret = bh;
1562 }
1563}
1564/*
1565 * Find the leaf block in the tree which would contain cpos. No
1566 * checking of the actual leaf is done.
1567 *
1568 * Some paths want to call this instead of allocating a path structure
1569 * and calling ocfs2_find_path().
1570 *
1571 * This function doesn't handle non btree extent lists.
1572 */
Mark Fasheh363041a2007-01-17 12:31:35 -08001573int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
1574 u32 cpos, struct buffer_head **leaf_bh)
Mark Fashehdcd05382007-01-16 11:32:23 -08001575{
1576 int ret;
1577 struct buffer_head *bh = NULL;
1578
1579 ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh);
1580 if (ret) {
1581 mlog_errno(ret);
1582 goto out;
1583 }
1584
1585 *leaf_bh = bh;
1586out:
1587 return ret;
1588}
1589
1590/*
1591 * Adjust the adjacent records (left_rec, right_rec) involved in a rotation.
1592 *
1593 * Basically, we've moved stuff around at the bottom of the tree and
1594 * we need to fix up the extent records above the changes to reflect
1595 * the new changes.
1596 *
1597 * left_rec: the record on the left.
1598 * left_child_el: is the child list pointed to by left_rec
1599 * right_rec: the record to the right of left_rec
1600 * right_child_el: is the child list pointed to by right_rec
1601 *
1602 * By definition, this only works on interior nodes.
1603 */
1604static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
1605 struct ocfs2_extent_list *left_child_el,
1606 struct ocfs2_extent_rec *right_rec,
1607 struct ocfs2_extent_list *right_child_el)
1608{
1609 u32 left_clusters, right_end;
1610
1611 /*
1612 * Interior nodes never have holes. Their cpos is the cpos of
1613 * the leftmost record in their child list. Their cluster
1614 * count covers the full theoretical range of their child list
1615 * - the range between their cpos and the cpos of the record
1616 * immediately to their right.
1617 */
1618 left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
Mark Fasheh328d5752007-06-18 10:48:04 -07001619 if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) {
1620 BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
1621 left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
1622 }
Mark Fashehdcd05382007-01-16 11:32:23 -08001623 left_clusters -= le32_to_cpu(left_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001624 left_rec->e_int_clusters = cpu_to_le32(left_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08001625
1626 /*
1627 * Calculate the rightmost cluster count boundary before
Mark Fashehe48edee2007-03-07 16:46:57 -08001628 * moving cpos - we will need to adjust clusters after
Mark Fashehdcd05382007-01-16 11:32:23 -08001629 * updating e_cpos to keep the same highest cluster count.
1630 */
1631 right_end = le32_to_cpu(right_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001632 right_end += le32_to_cpu(right_rec->e_int_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08001633
1634 right_rec->e_cpos = left_rec->e_cpos;
1635 le32_add_cpu(&right_rec->e_cpos, left_clusters);
1636
1637 right_end -= le32_to_cpu(right_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001638 right_rec->e_int_clusters = cpu_to_le32(right_end);
Mark Fashehdcd05382007-01-16 11:32:23 -08001639}
1640
1641/*
1642 * Adjust the adjacent root node records involved in a
1643 * rotation. left_el_blkno is passed in as a key so that we can easily
1644 * find it's index in the root list.
1645 */
1646static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
1647 struct ocfs2_extent_list *left_el,
1648 struct ocfs2_extent_list *right_el,
1649 u64 left_el_blkno)
1650{
1651 int i;
1652
1653 BUG_ON(le16_to_cpu(root_el->l_tree_depth) <=
1654 le16_to_cpu(left_el->l_tree_depth));
1655
1656 for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) {
1657 if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno)
1658 break;
1659 }
1660
1661 /*
1662 * The path walking code should have never returned a root and
1663 * two paths which are not adjacent.
1664 */
1665 BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1));
1666
1667 ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el,
1668 &root_el->l_recs[i + 1], right_el);
1669}
1670
1671/*
1672 * We've changed a leaf block (in right_path) and need to reflect that
1673 * change back up the subtree.
1674 *
1675 * This happens in multiple places:
1676 * - When we've moved an extent record from the left path leaf to the right
1677 * path leaf to make room for an empty extent in the left path leaf.
1678 * - When our insert into the right path leaf is at the leftmost edge
1679 * and requires an update of the path immediately to it's left. This
1680 * can occur at the end of some types of rotation and appending inserts.
Tao Ma677b9752008-01-30 14:21:05 +08001681 * - When we've adjusted the last extent record in the left path leaf and the
1682 * 1st extent record in the right path leaf during cross extent block merge.
Mark Fashehdcd05382007-01-16 11:32:23 -08001683 */
1684static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle,
1685 struct ocfs2_path *left_path,
1686 struct ocfs2_path *right_path,
1687 int subtree_index)
1688{
1689 int ret, i, idx;
1690 struct ocfs2_extent_list *el, *left_el, *right_el;
1691 struct ocfs2_extent_rec *left_rec, *right_rec;
1692 struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
1693
1694 /*
1695 * Update the counts and position values within all the
1696 * interior nodes to reflect the leaf rotation we just did.
1697 *
1698 * The root node is handled below the loop.
1699 *
1700 * We begin the loop with right_el and left_el pointing to the
1701 * leaf lists and work our way up.
1702 *
1703 * NOTE: within this loop, left_el and right_el always refer
1704 * to the *child* lists.
1705 */
1706 left_el = path_leaf_el(left_path);
1707 right_el = path_leaf_el(right_path);
1708 for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
1709 mlog(0, "Adjust records at index %u\n", i);
1710
1711 /*
1712 * One nice property of knowing that all of these
1713 * nodes are below the root is that we only deal with
1714 * the leftmost right node record and the rightmost
1715 * left node record.
1716 */
1717 el = left_path->p_node[i].el;
1718 idx = le16_to_cpu(left_el->l_next_free_rec) - 1;
1719 left_rec = &el->l_recs[idx];
1720
1721 el = right_path->p_node[i].el;
1722 right_rec = &el->l_recs[0];
1723
1724 ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec,
1725 right_el);
1726
1727 ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
1728 if (ret)
1729 mlog_errno(ret);
1730
1731 ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
1732 if (ret)
1733 mlog_errno(ret);
1734
1735 /*
1736 * Setup our list pointers now so that the current
1737 * parents become children in the next iteration.
1738 */
1739 left_el = left_path->p_node[i].el;
1740 right_el = right_path->p_node[i].el;
1741 }
1742
1743 /*
1744 * At the root node, adjust the two adjacent records which
1745 * begin our path to the leaves.
1746 */
1747
1748 el = left_path->p_node[subtree_index].el;
1749 left_el = left_path->p_node[subtree_index + 1].el;
1750 right_el = right_path->p_node[subtree_index + 1].el;
1751
1752 ocfs2_adjust_root_records(el, left_el, right_el,
1753 left_path->p_node[subtree_index + 1].bh->b_blocknr);
1754
1755 root_bh = left_path->p_node[subtree_index].bh;
1756
1757 ret = ocfs2_journal_dirty(handle, root_bh);
1758 if (ret)
1759 mlog_errno(ret);
1760}
1761
1762static int ocfs2_rotate_subtree_right(struct inode *inode,
1763 handle_t *handle,
1764 struct ocfs2_path *left_path,
1765 struct ocfs2_path *right_path,
1766 int subtree_index)
1767{
1768 int ret, i;
1769 struct buffer_head *right_leaf_bh;
1770 struct buffer_head *left_leaf_bh = NULL;
1771 struct buffer_head *root_bh;
1772 struct ocfs2_extent_list *right_el, *left_el;
1773 struct ocfs2_extent_rec move_rec;
1774
1775 left_leaf_bh = path_leaf_bh(left_path);
1776 left_el = path_leaf_el(left_path);
1777
1778 if (left_el->l_next_free_rec != left_el->l_count) {
1779 ocfs2_error(inode->i_sb,
1780 "Inode %llu has non-full interior leaf node %llu"
1781 "(next free = %u)",
1782 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1783 (unsigned long long)left_leaf_bh->b_blocknr,
1784 le16_to_cpu(left_el->l_next_free_rec));
1785 return -EROFS;
1786 }
1787
1788 /*
1789 * This extent block may already have an empty record, so we
1790 * return early if so.
1791 */
1792 if (ocfs2_is_empty_extent(&left_el->l_recs[0]))
1793 return 0;
1794
1795 root_bh = left_path->p_node[subtree_index].bh;
1796 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
1797
1798 ret = ocfs2_journal_access(handle, inode, root_bh,
1799 OCFS2_JOURNAL_ACCESS_WRITE);
1800 if (ret) {
1801 mlog_errno(ret);
1802 goto out;
1803 }
1804
1805 for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
1806 ret = ocfs2_journal_access(handle, inode,
1807 right_path->p_node[i].bh,
1808 OCFS2_JOURNAL_ACCESS_WRITE);
1809 if (ret) {
1810 mlog_errno(ret);
1811 goto out;
1812 }
1813
1814 ret = ocfs2_journal_access(handle, inode,
1815 left_path->p_node[i].bh,
1816 OCFS2_JOURNAL_ACCESS_WRITE);
1817 if (ret) {
1818 mlog_errno(ret);
1819 goto out;
1820 }
1821 }
1822
1823 right_leaf_bh = path_leaf_bh(right_path);
1824 right_el = path_leaf_el(right_path);
1825
1826 /* This is a code error, not a disk corruption. */
1827 mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
1828 "because rightmost leaf block %llu is empty\n",
1829 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1830 (unsigned long long)right_leaf_bh->b_blocknr);
1831
1832 ocfs2_create_empty_extent(right_el);
1833
1834 ret = ocfs2_journal_dirty(handle, right_leaf_bh);
1835 if (ret) {
1836 mlog_errno(ret);
1837 goto out;
1838 }
1839
1840 /* Do the copy now. */
1841 i = le16_to_cpu(left_el->l_next_free_rec) - 1;
1842 move_rec = left_el->l_recs[i];
1843 right_el->l_recs[0] = move_rec;
1844
1845 /*
1846 * Clear out the record we just copied and shift everything
1847 * over, leaving an empty extent in the left leaf.
1848 *
1849 * We temporarily subtract from next_free_rec so that the
1850 * shift will lose the tail record (which is now defunct).
1851 */
1852 le16_add_cpu(&left_el->l_next_free_rec, -1);
1853 ocfs2_shift_records_right(left_el);
1854 memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
1855 le16_add_cpu(&left_el->l_next_free_rec, 1);
1856
1857 ret = ocfs2_journal_dirty(handle, left_leaf_bh);
1858 if (ret) {
1859 mlog_errno(ret);
1860 goto out;
1861 }
1862
1863 ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
1864 subtree_index);
1865
1866out:
1867 return ret;
1868}
1869
1870/*
1871 * Given a full path, determine what cpos value would return us a path
1872 * containing the leaf immediately to the left of the current one.
1873 *
1874 * Will return zero if the path passed in is already the leftmost path.
1875 */
1876static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
1877 struct ocfs2_path *path, u32 *cpos)
1878{
1879 int i, j, ret = 0;
1880 u64 blkno;
1881 struct ocfs2_extent_list *el;
1882
Mark Fashehe48edee2007-03-07 16:46:57 -08001883 BUG_ON(path->p_tree_depth == 0);
1884
Mark Fashehdcd05382007-01-16 11:32:23 -08001885 *cpos = 0;
1886
1887 blkno = path_leaf_bh(path)->b_blocknr;
1888
1889 /* Start at the tree node just above the leaf and work our way up. */
1890 i = path->p_tree_depth - 1;
1891 while (i >= 0) {
1892 el = path->p_node[i].el;
1893
1894 /*
1895 * Find the extent record just before the one in our
1896 * path.
1897 */
1898 for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
1899 if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
1900 if (j == 0) {
1901 if (i == 0) {
1902 /*
1903 * We've determined that the
1904 * path specified is already
1905 * the leftmost one - return a
1906 * cpos of zero.
1907 */
1908 goto out;
1909 }
1910 /*
1911 * The leftmost record points to our
1912 * leaf - we need to travel up the
1913 * tree one level.
1914 */
1915 goto next_node;
1916 }
1917
1918 *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001919 *cpos = *cpos + ocfs2_rec_clusters(el,
1920 &el->l_recs[j - 1]);
1921 *cpos = *cpos - 1;
Mark Fashehdcd05382007-01-16 11:32:23 -08001922 goto out;
1923 }
1924 }
1925
1926 /*
1927 * If we got here, we never found a valid node where
1928 * the tree indicated one should be.
1929 */
1930 ocfs2_error(sb,
1931 "Invalid extent tree at extent block %llu\n",
1932 (unsigned long long)blkno);
1933 ret = -EROFS;
1934 goto out;
1935
1936next_node:
1937 blkno = path->p_node[i].bh->b_blocknr;
1938 i--;
1939 }
1940
1941out:
1942 return ret;
1943}
1944
Mark Fasheh328d5752007-06-18 10:48:04 -07001945/*
1946 * Extend the transaction by enough credits to complete the rotation,
1947 * and still leave at least the original number of credits allocated
1948 * to this transaction.
1949 */
Mark Fashehdcd05382007-01-16 11:32:23 -08001950static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
Mark Fasheh328d5752007-06-18 10:48:04 -07001951 int op_credits,
Mark Fashehdcd05382007-01-16 11:32:23 -08001952 struct ocfs2_path *path)
1953{
Mark Fasheh328d5752007-06-18 10:48:04 -07001954 int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
Mark Fashehdcd05382007-01-16 11:32:23 -08001955
1956 if (handle->h_buffer_credits < credits)
1957 return ocfs2_extend_trans(handle, credits);
1958
1959 return 0;
1960}
1961
1962/*
1963 * Trap the case where we're inserting into the theoretical range past
1964 * the _actual_ left leaf range. Otherwise, we'll rotate a record
1965 * whose cpos is less than ours into the right leaf.
1966 *
1967 * It's only necessary to look at the rightmost record of the left
1968 * leaf because the logic that calls us should ensure that the
1969 * theoretical ranges in the path components above the leaves are
1970 * correct.
1971 */
1972static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path,
1973 u32 insert_cpos)
1974{
1975 struct ocfs2_extent_list *left_el;
1976 struct ocfs2_extent_rec *rec;
1977 int next_free;
1978
1979 left_el = path_leaf_el(left_path);
1980 next_free = le16_to_cpu(left_el->l_next_free_rec);
1981 rec = &left_el->l_recs[next_free - 1];
1982
1983 if (insert_cpos > le32_to_cpu(rec->e_cpos))
1984 return 1;
1985 return 0;
1986}
1987
Mark Fasheh328d5752007-06-18 10:48:04 -07001988static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
1989{
1990 int next_free = le16_to_cpu(el->l_next_free_rec);
1991 unsigned int range;
1992 struct ocfs2_extent_rec *rec;
1993
1994 if (next_free == 0)
1995 return 0;
1996
1997 rec = &el->l_recs[0];
1998 if (ocfs2_is_empty_extent(rec)) {
1999 /* Empty list. */
2000 if (next_free == 1)
2001 return 0;
2002 rec = &el->l_recs[1];
2003 }
2004
2005 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
2006 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
2007 return 1;
2008 return 0;
2009}
2010
Mark Fashehdcd05382007-01-16 11:32:23 -08002011/*
2012 * Rotate all the records in a btree right one record, starting at insert_cpos.
2013 *
2014 * The path to the rightmost leaf should be passed in.
2015 *
2016 * The array is assumed to be large enough to hold an entire path (tree depth).
2017 *
2018 * Upon succesful return from this function:
2019 *
2020 * - The 'right_path' array will contain a path to the leaf block
2021 * whose range contains e_cpos.
2022 * - That leaf block will have a single empty extent in list index 0.
2023 * - In the case that the rotation requires a post-insert update,
2024 * *ret_left_path will contain a valid path which can be passed to
2025 * ocfs2_insert_path().
2026 */
2027static int ocfs2_rotate_tree_right(struct inode *inode,
2028 handle_t *handle,
Mark Fasheh328d5752007-06-18 10:48:04 -07002029 enum ocfs2_split_type split,
Mark Fashehdcd05382007-01-16 11:32:23 -08002030 u32 insert_cpos,
2031 struct ocfs2_path *right_path,
2032 struct ocfs2_path **ret_left_path)
2033{
Mark Fasheh328d5752007-06-18 10:48:04 -07002034 int ret, start, orig_credits = handle->h_buffer_credits;
Mark Fashehdcd05382007-01-16 11:32:23 -08002035 u32 cpos;
2036 struct ocfs2_path *left_path = NULL;
2037
2038 *ret_left_path = NULL;
2039
2040 left_path = ocfs2_new_path(path_root_bh(right_path),
2041 path_root_el(right_path));
2042 if (!left_path) {
2043 ret = -ENOMEM;
2044 mlog_errno(ret);
2045 goto out;
2046 }
2047
2048 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos);
2049 if (ret) {
2050 mlog_errno(ret);
2051 goto out;
2052 }
2053
2054 mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos);
2055
2056 /*
2057 * What we want to do here is:
2058 *
2059 * 1) Start with the rightmost path.
2060 *
2061 * 2) Determine a path to the leaf block directly to the left
2062 * of that leaf.
2063 *
2064 * 3) Determine the 'subtree root' - the lowest level tree node
2065 * which contains a path to both leaves.
2066 *
2067 * 4) Rotate the subtree.
2068 *
2069 * 5) Find the next subtree by considering the left path to be
2070 * the new right path.
2071 *
2072 * The check at the top of this while loop also accepts
2073 * insert_cpos == cpos because cpos is only a _theoretical_
2074 * value to get us the left path - insert_cpos might very well
2075 * be filling that hole.
2076 *
2077 * Stop at a cpos of '0' because we either started at the
2078 * leftmost branch (i.e., a tree with one branch and a
2079 * rotation inside of it), or we've gone as far as we can in
2080 * rotating subtrees.
2081 */
2082 while (cpos && insert_cpos <= cpos) {
2083 mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n",
2084 insert_cpos, cpos);
2085
2086 ret = ocfs2_find_path(inode, left_path, cpos);
2087 if (ret) {
2088 mlog_errno(ret);
2089 goto out;
2090 }
2091
2092 mlog_bug_on_msg(path_leaf_bh(left_path) ==
2093 path_leaf_bh(right_path),
2094 "Inode %lu: error during insert of %u "
2095 "(left path cpos %u) results in two identical "
2096 "paths ending at %llu\n",
2097 inode->i_ino, insert_cpos, cpos,
2098 (unsigned long long)
2099 path_leaf_bh(left_path)->b_blocknr);
2100
Mark Fasheh328d5752007-06-18 10:48:04 -07002101 if (split == SPLIT_NONE &&
2102 ocfs2_rotate_requires_path_adjustment(left_path,
Mark Fashehdcd05382007-01-16 11:32:23 -08002103 insert_cpos)) {
Mark Fashehdcd05382007-01-16 11:32:23 -08002104
2105 /*
2106 * We've rotated the tree as much as we
2107 * should. The rest is up to
2108 * ocfs2_insert_path() to complete, after the
2109 * record insertion. We indicate this
2110 * situation by returning the left path.
2111 *
2112 * The reason we don't adjust the records here
2113 * before the record insert is that an error
2114 * later might break the rule where a parent
2115 * record e_cpos will reflect the actual
2116 * e_cpos of the 1st nonempty record of the
2117 * child list.
2118 */
2119 *ret_left_path = left_path;
2120 goto out_ret_path;
2121 }
2122
2123 start = ocfs2_find_subtree_root(inode, left_path, right_path);
2124
2125 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
2126 start,
2127 (unsigned long long) right_path->p_node[start].bh->b_blocknr,
2128 right_path->p_tree_depth);
2129
2130 ret = ocfs2_extend_rotate_transaction(handle, start,
Mark Fasheh328d5752007-06-18 10:48:04 -07002131 orig_credits, right_path);
Mark Fashehdcd05382007-01-16 11:32:23 -08002132 if (ret) {
2133 mlog_errno(ret);
2134 goto out;
2135 }
2136
2137 ret = ocfs2_rotate_subtree_right(inode, handle, left_path,
2138 right_path, start);
2139 if (ret) {
2140 mlog_errno(ret);
2141 goto out;
2142 }
2143
Mark Fasheh328d5752007-06-18 10:48:04 -07002144 if (split != SPLIT_NONE &&
2145 ocfs2_leftmost_rec_contains(path_leaf_el(right_path),
2146 insert_cpos)) {
2147 /*
2148 * A rotate moves the rightmost left leaf
2149 * record over to the leftmost right leaf
2150 * slot. If we're doing an extent split
2151 * instead of a real insert, then we have to
2152 * check that the extent to be split wasn't
2153 * just moved over. If it was, then we can
2154 * exit here, passing left_path back -
2155 * ocfs2_split_extent() is smart enough to
2156 * search both leaves.
2157 */
2158 *ret_left_path = left_path;
2159 goto out_ret_path;
2160 }
2161
Mark Fashehdcd05382007-01-16 11:32:23 -08002162 /*
2163 * There is no need to re-read the next right path
2164 * as we know that it'll be our current left
2165 * path. Optimize by copying values instead.
2166 */
2167 ocfs2_mv_path(right_path, left_path);
2168
2169 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
2170 &cpos);
2171 if (ret) {
2172 mlog_errno(ret);
2173 goto out;
2174 }
2175 }
2176
2177out:
2178 ocfs2_free_path(left_path);
2179
2180out_ret_path:
2181 return ret;
2182}
2183
Mark Fasheh328d5752007-06-18 10:48:04 -07002184static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
2185 struct ocfs2_path *path)
2186{
2187 int i, idx;
2188 struct ocfs2_extent_rec *rec;
2189 struct ocfs2_extent_list *el;
2190 struct ocfs2_extent_block *eb;
2191 u32 range;
2192
2193 /* Path should always be rightmost. */
2194 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
2195 BUG_ON(eb->h_next_leaf_blk != 0ULL);
2196
2197 el = &eb->h_list;
2198 BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
2199 idx = le16_to_cpu(el->l_next_free_rec) - 1;
2200 rec = &el->l_recs[idx];
2201 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
2202
2203 for (i = 0; i < path->p_tree_depth; i++) {
2204 el = path->p_node[i].el;
2205 idx = le16_to_cpu(el->l_next_free_rec) - 1;
2206 rec = &el->l_recs[idx];
2207
2208 rec->e_int_clusters = cpu_to_le32(range);
2209 le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos));
2210
2211 ocfs2_journal_dirty(handle, path->p_node[i].bh);
2212 }
2213}
2214
2215static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
2216 struct ocfs2_cached_dealloc_ctxt *dealloc,
2217 struct ocfs2_path *path, int unlink_start)
2218{
2219 int ret, i;
2220 struct ocfs2_extent_block *eb;
2221 struct ocfs2_extent_list *el;
2222 struct buffer_head *bh;
2223
2224 for(i = unlink_start; i < path_num_items(path); i++) {
2225 bh = path->p_node[i].bh;
2226
2227 eb = (struct ocfs2_extent_block *)bh->b_data;
2228 /*
2229 * Not all nodes might have had their final count
2230 * decremented by the caller - handle this here.
2231 */
2232 el = &eb->h_list;
2233 if (le16_to_cpu(el->l_next_free_rec) > 1) {
2234 mlog(ML_ERROR,
2235 "Inode %llu, attempted to remove extent block "
2236 "%llu with %u records\n",
2237 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2238 (unsigned long long)le64_to_cpu(eb->h_blkno),
2239 le16_to_cpu(el->l_next_free_rec));
2240
2241 ocfs2_journal_dirty(handle, bh);
2242 ocfs2_remove_from_cache(inode, bh);
2243 continue;
2244 }
2245
2246 el->l_next_free_rec = 0;
2247 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
2248
2249 ocfs2_journal_dirty(handle, bh);
2250
2251 ret = ocfs2_cache_extent_block_free(dealloc, eb);
2252 if (ret)
2253 mlog_errno(ret);
2254
2255 ocfs2_remove_from_cache(inode, bh);
2256 }
2257}
2258
2259static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle,
2260 struct ocfs2_path *left_path,
2261 struct ocfs2_path *right_path,
2262 int subtree_index,
2263 struct ocfs2_cached_dealloc_ctxt *dealloc)
2264{
2265 int i;
2266 struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
2267 struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el;
2268 struct ocfs2_extent_list *el;
2269 struct ocfs2_extent_block *eb;
2270
2271 el = path_leaf_el(left_path);
2272
2273 eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data;
2274
2275 for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
2276 if (root_el->l_recs[i].e_blkno == eb->h_blkno)
2277 break;
2278
2279 BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec));
2280
2281 memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
2282 le16_add_cpu(&root_el->l_next_free_rec, -1);
2283
2284 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
2285 eb->h_next_leaf_blk = 0;
2286
2287 ocfs2_journal_dirty(handle, root_bh);
2288 ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
2289
2290 ocfs2_unlink_path(inode, handle, dealloc, right_path,
2291 subtree_index + 1);
2292}
2293
2294static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
2295 struct ocfs2_path *left_path,
2296 struct ocfs2_path *right_path,
2297 int subtree_index,
2298 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08002299 int *deleted,
2300 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002301{
2302 int ret, i, del_right_subtree = 0, right_has_empty = 0;
Tao Mae7d4cb62008-08-18 17:38:44 +08002303 struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path);
Mark Fasheh328d5752007-06-18 10:48:04 -07002304 struct ocfs2_extent_list *right_leaf_el, *left_leaf_el;
2305 struct ocfs2_extent_block *eb;
2306
2307 *deleted = 0;
2308
2309 right_leaf_el = path_leaf_el(right_path);
2310 left_leaf_el = path_leaf_el(left_path);
2311 root_bh = left_path->p_node[subtree_index].bh;
2312 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
2313
2314 if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0]))
2315 return 0;
2316
2317 eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data;
2318 if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) {
2319 /*
2320 * It's legal for us to proceed if the right leaf is
2321 * the rightmost one and it has an empty extent. There
2322 * are two cases to handle - whether the leaf will be
2323 * empty after removal or not. If the leaf isn't empty
2324 * then just remove the empty extent up front. The
2325 * next block will handle empty leaves by flagging
2326 * them for unlink.
2327 *
2328 * Non rightmost leaves will throw -EAGAIN and the
2329 * caller can manually move the subtree and retry.
2330 */
2331
2332 if (eb->h_next_leaf_blk != 0ULL)
2333 return -EAGAIN;
2334
2335 if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) {
2336 ret = ocfs2_journal_access(handle, inode,
2337 path_leaf_bh(right_path),
2338 OCFS2_JOURNAL_ACCESS_WRITE);
2339 if (ret) {
2340 mlog_errno(ret);
2341 goto out;
2342 }
2343
2344 ocfs2_remove_empty_extent(right_leaf_el);
2345 } else
2346 right_has_empty = 1;
2347 }
2348
2349 if (eb->h_next_leaf_blk == 0ULL &&
2350 le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) {
2351 /*
2352 * We have to update i_last_eb_blk during the meta
2353 * data delete.
2354 */
Tao Mae7d4cb62008-08-18 17:38:44 +08002355 ret = ocfs2_journal_access(handle, inode, et_root_bh,
Mark Fasheh328d5752007-06-18 10:48:04 -07002356 OCFS2_JOURNAL_ACCESS_WRITE);
2357 if (ret) {
2358 mlog_errno(ret);
2359 goto out;
2360 }
2361
2362 del_right_subtree = 1;
2363 }
2364
2365 /*
2366 * Getting here with an empty extent in the right path implies
2367 * that it's the rightmost path and will be deleted.
2368 */
2369 BUG_ON(right_has_empty && !del_right_subtree);
2370
2371 ret = ocfs2_journal_access(handle, inode, root_bh,
2372 OCFS2_JOURNAL_ACCESS_WRITE);
2373 if (ret) {
2374 mlog_errno(ret);
2375 goto out;
2376 }
2377
2378 for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
2379 ret = ocfs2_journal_access(handle, inode,
2380 right_path->p_node[i].bh,
2381 OCFS2_JOURNAL_ACCESS_WRITE);
2382 if (ret) {
2383 mlog_errno(ret);
2384 goto out;
2385 }
2386
2387 ret = ocfs2_journal_access(handle, inode,
2388 left_path->p_node[i].bh,
2389 OCFS2_JOURNAL_ACCESS_WRITE);
2390 if (ret) {
2391 mlog_errno(ret);
2392 goto out;
2393 }
2394 }
2395
2396 if (!right_has_empty) {
2397 /*
2398 * Only do this if we're moving a real
2399 * record. Otherwise, the action is delayed until
2400 * after removal of the right path in which case we
2401 * can do a simple shift to remove the empty extent.
2402 */
2403 ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]);
2404 memset(&right_leaf_el->l_recs[0], 0,
2405 sizeof(struct ocfs2_extent_rec));
2406 }
2407 if (eb->h_next_leaf_blk == 0ULL) {
2408 /*
2409 * Move recs over to get rid of empty extent, decrease
2410 * next_free. This is allowed to remove the last
2411 * extent in our leaf (setting l_next_free_rec to
2412 * zero) - the delete code below won't care.
2413 */
2414 ocfs2_remove_empty_extent(right_leaf_el);
2415 }
2416
2417 ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
2418 if (ret)
2419 mlog_errno(ret);
2420 ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
2421 if (ret)
2422 mlog_errno(ret);
2423
2424 if (del_right_subtree) {
2425 ocfs2_unlink_subtree(inode, handle, left_path, right_path,
2426 subtree_index, dealloc);
2427 ocfs2_update_edge_lengths(inode, handle, left_path);
2428
2429 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
Joel Becker35dc0aa2008-08-20 16:25:06 -07002430 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
Mark Fasheh328d5752007-06-18 10:48:04 -07002431
2432 /*
2433 * Removal of the extent in the left leaf was skipped
2434 * above so we could delete the right path
2435 * 1st.
2436 */
2437 if (right_has_empty)
2438 ocfs2_remove_empty_extent(left_leaf_el);
2439
Tao Mae7d4cb62008-08-18 17:38:44 +08002440 ret = ocfs2_journal_dirty(handle, et_root_bh);
Mark Fasheh328d5752007-06-18 10:48:04 -07002441 if (ret)
2442 mlog_errno(ret);
2443
2444 *deleted = 1;
2445 } else
2446 ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
2447 subtree_index);
2448
2449out:
2450 return ret;
2451}
2452
2453/*
2454 * Given a full path, determine what cpos value would return us a path
2455 * containing the leaf immediately to the right of the current one.
2456 *
2457 * Will return zero if the path passed in is already the rightmost path.
2458 *
2459 * This looks similar, but is subtly different to
2460 * ocfs2_find_cpos_for_left_leaf().
2461 */
2462static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
2463 struct ocfs2_path *path, u32 *cpos)
2464{
2465 int i, j, ret = 0;
2466 u64 blkno;
2467 struct ocfs2_extent_list *el;
2468
2469 *cpos = 0;
2470
2471 if (path->p_tree_depth == 0)
2472 return 0;
2473
2474 blkno = path_leaf_bh(path)->b_blocknr;
2475
2476 /* Start at the tree node just above the leaf and work our way up. */
2477 i = path->p_tree_depth - 1;
2478 while (i >= 0) {
2479 int next_free;
2480
2481 el = path->p_node[i].el;
2482
2483 /*
2484 * Find the extent record just after the one in our
2485 * path.
2486 */
2487 next_free = le16_to_cpu(el->l_next_free_rec);
2488 for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
2489 if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
2490 if (j == (next_free - 1)) {
2491 if (i == 0) {
2492 /*
2493 * We've determined that the
2494 * path specified is already
2495 * the rightmost one - return a
2496 * cpos of zero.
2497 */
2498 goto out;
2499 }
2500 /*
2501 * The rightmost record points to our
2502 * leaf - we need to travel up the
2503 * tree one level.
2504 */
2505 goto next_node;
2506 }
2507
2508 *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos);
2509 goto out;
2510 }
2511 }
2512
2513 /*
2514 * If we got here, we never found a valid node where
2515 * the tree indicated one should be.
2516 */
2517 ocfs2_error(sb,
2518 "Invalid extent tree at extent block %llu\n",
2519 (unsigned long long)blkno);
2520 ret = -EROFS;
2521 goto out;
2522
2523next_node:
2524 blkno = path->p_node[i].bh->b_blocknr;
2525 i--;
2526 }
2527
2528out:
2529 return ret;
2530}
2531
2532static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode,
2533 handle_t *handle,
2534 struct buffer_head *bh,
2535 struct ocfs2_extent_list *el)
2536{
2537 int ret;
2538
2539 if (!ocfs2_is_empty_extent(&el->l_recs[0]))
2540 return 0;
2541
2542 ret = ocfs2_journal_access(handle, inode, bh,
2543 OCFS2_JOURNAL_ACCESS_WRITE);
2544 if (ret) {
2545 mlog_errno(ret);
2546 goto out;
2547 }
2548
2549 ocfs2_remove_empty_extent(el);
2550
2551 ret = ocfs2_journal_dirty(handle, bh);
2552 if (ret)
2553 mlog_errno(ret);
2554
2555out:
2556 return ret;
2557}
2558
2559static int __ocfs2_rotate_tree_left(struct inode *inode,
2560 handle_t *handle, int orig_credits,
2561 struct ocfs2_path *path,
2562 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08002563 struct ocfs2_path **empty_extent_path,
2564 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002565{
2566 int ret, subtree_root, deleted;
2567 u32 right_cpos;
2568 struct ocfs2_path *left_path = NULL;
2569 struct ocfs2_path *right_path = NULL;
2570
2571 BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0])));
2572
2573 *empty_extent_path = NULL;
2574
2575 ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, path,
2576 &right_cpos);
2577 if (ret) {
2578 mlog_errno(ret);
2579 goto out;
2580 }
2581
2582 left_path = ocfs2_new_path(path_root_bh(path),
2583 path_root_el(path));
2584 if (!left_path) {
2585 ret = -ENOMEM;
2586 mlog_errno(ret);
2587 goto out;
2588 }
2589
2590 ocfs2_cp_path(left_path, path);
2591
2592 right_path = ocfs2_new_path(path_root_bh(path),
2593 path_root_el(path));
2594 if (!right_path) {
2595 ret = -ENOMEM;
2596 mlog_errno(ret);
2597 goto out;
2598 }
2599
2600 while (right_cpos) {
2601 ret = ocfs2_find_path(inode, right_path, right_cpos);
2602 if (ret) {
2603 mlog_errno(ret);
2604 goto out;
2605 }
2606
2607 subtree_root = ocfs2_find_subtree_root(inode, left_path,
2608 right_path);
2609
2610 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
2611 subtree_root,
2612 (unsigned long long)
2613 right_path->p_node[subtree_root].bh->b_blocknr,
2614 right_path->p_tree_depth);
2615
2616 ret = ocfs2_extend_rotate_transaction(handle, subtree_root,
2617 orig_credits, left_path);
2618 if (ret) {
2619 mlog_errno(ret);
2620 goto out;
2621 }
2622
Mark Fashehe8aed342007-12-03 16:43:01 -08002623 /*
2624 * Caller might still want to make changes to the
2625 * tree root, so re-add it to the journal here.
2626 */
2627 ret = ocfs2_journal_access(handle, inode,
2628 path_root_bh(left_path),
2629 OCFS2_JOURNAL_ACCESS_WRITE);
2630 if (ret) {
2631 mlog_errno(ret);
2632 goto out;
2633 }
2634
Mark Fasheh328d5752007-06-18 10:48:04 -07002635 ret = ocfs2_rotate_subtree_left(inode, handle, left_path,
2636 right_path, subtree_root,
Tao Mae7d4cb62008-08-18 17:38:44 +08002637 dealloc, &deleted, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002638 if (ret == -EAGAIN) {
2639 /*
2640 * The rotation has to temporarily stop due to
2641 * the right subtree having an empty
2642 * extent. Pass it back to the caller for a
2643 * fixup.
2644 */
2645 *empty_extent_path = right_path;
2646 right_path = NULL;
2647 goto out;
2648 }
2649 if (ret) {
2650 mlog_errno(ret);
2651 goto out;
2652 }
2653
2654 /*
2655 * The subtree rotate might have removed records on
2656 * the rightmost edge. If so, then rotation is
2657 * complete.
2658 */
2659 if (deleted)
2660 break;
2661
2662 ocfs2_mv_path(left_path, right_path);
2663
2664 ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path,
2665 &right_cpos);
2666 if (ret) {
2667 mlog_errno(ret);
2668 goto out;
2669 }
2670 }
2671
2672out:
2673 ocfs2_free_path(right_path);
2674 ocfs2_free_path(left_path);
2675
2676 return ret;
2677}
2678
2679static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08002680 struct ocfs2_path *path,
2681 struct ocfs2_cached_dealloc_ctxt *dealloc,
2682 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002683{
2684 int ret, subtree_index;
2685 u32 cpos;
2686 struct ocfs2_path *left_path = NULL;
Mark Fasheh328d5752007-06-18 10:48:04 -07002687 struct ocfs2_extent_block *eb;
2688 struct ocfs2_extent_list *el;
2689
Mark Fasheh328d5752007-06-18 10:48:04 -07002690
Joel Becker35dc0aa2008-08-20 16:25:06 -07002691 ret = ocfs2_et_sanity_check(inode, et);
Tao Mae7d4cb62008-08-18 17:38:44 +08002692 if (ret)
2693 goto out;
Mark Fasheh328d5752007-06-18 10:48:04 -07002694 /*
2695 * There's two ways we handle this depending on
2696 * whether path is the only existing one.
2697 */
2698 ret = ocfs2_extend_rotate_transaction(handle, 0,
2699 handle->h_buffer_credits,
2700 path);
2701 if (ret) {
2702 mlog_errno(ret);
2703 goto out;
2704 }
2705
2706 ret = ocfs2_journal_access_path(inode, handle, path);
2707 if (ret) {
2708 mlog_errno(ret);
2709 goto out;
2710 }
2711
2712 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
2713 if (ret) {
2714 mlog_errno(ret);
2715 goto out;
2716 }
2717
2718 if (cpos) {
2719 /*
2720 * We have a path to the left of this one - it needs
2721 * an update too.
2722 */
2723 left_path = ocfs2_new_path(path_root_bh(path),
2724 path_root_el(path));
2725 if (!left_path) {
2726 ret = -ENOMEM;
2727 mlog_errno(ret);
2728 goto out;
2729 }
2730
2731 ret = ocfs2_find_path(inode, left_path, cpos);
2732 if (ret) {
2733 mlog_errno(ret);
2734 goto out;
2735 }
2736
2737 ret = ocfs2_journal_access_path(inode, handle, left_path);
2738 if (ret) {
2739 mlog_errno(ret);
2740 goto out;
2741 }
2742
2743 subtree_index = ocfs2_find_subtree_root(inode, left_path, path);
2744
2745 ocfs2_unlink_subtree(inode, handle, left_path, path,
2746 subtree_index, dealloc);
2747 ocfs2_update_edge_lengths(inode, handle, left_path);
2748
2749 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
Joel Becker35dc0aa2008-08-20 16:25:06 -07002750 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
Mark Fasheh328d5752007-06-18 10:48:04 -07002751 } else {
2752 /*
2753 * 'path' is also the leftmost path which
2754 * means it must be the only one. This gets
2755 * handled differently because we want to
2756 * revert the inode back to having extents
2757 * in-line.
2758 */
2759 ocfs2_unlink_path(inode, handle, dealloc, path, 1);
2760
Joel Beckerce1d9ea2008-08-20 16:30:07 -07002761 el = et->et_root_el;
Mark Fasheh328d5752007-06-18 10:48:04 -07002762 el->l_tree_depth = 0;
2763 el->l_next_free_rec = 0;
2764 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
2765
Joel Becker35dc0aa2008-08-20 16:25:06 -07002766 ocfs2_et_set_last_eb_blk(et, 0);
Mark Fasheh328d5752007-06-18 10:48:04 -07002767 }
2768
2769 ocfs2_journal_dirty(handle, path_root_bh(path));
2770
2771out:
2772 ocfs2_free_path(left_path);
2773 return ret;
2774}
2775
2776/*
2777 * Left rotation of btree records.
2778 *
2779 * In many ways, this is (unsurprisingly) the opposite of right
2780 * rotation. We start at some non-rightmost path containing an empty
2781 * extent in the leaf block. The code works its way to the rightmost
2782 * path by rotating records to the left in every subtree.
2783 *
2784 * This is used by any code which reduces the number of extent records
2785 * in a leaf. After removal, an empty record should be placed in the
2786 * leftmost list position.
2787 *
2788 * This won't handle a length update of the rightmost path records if
2789 * the rightmost tree leaf record is removed so the caller is
2790 * responsible for detecting and correcting that.
2791 */
2792static int ocfs2_rotate_tree_left(struct inode *inode, handle_t *handle,
2793 struct ocfs2_path *path,
Tao Mae7d4cb62008-08-18 17:38:44 +08002794 struct ocfs2_cached_dealloc_ctxt *dealloc,
2795 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002796{
2797 int ret, orig_credits = handle->h_buffer_credits;
2798 struct ocfs2_path *tmp_path = NULL, *restart_path = NULL;
2799 struct ocfs2_extent_block *eb;
2800 struct ocfs2_extent_list *el;
2801
2802 el = path_leaf_el(path);
2803 if (!ocfs2_is_empty_extent(&el->l_recs[0]))
2804 return 0;
2805
2806 if (path->p_tree_depth == 0) {
2807rightmost_no_delete:
2808 /*
Tao Mae7d4cb62008-08-18 17:38:44 +08002809 * Inline extents. This is trivially handled, so do
Mark Fasheh328d5752007-06-18 10:48:04 -07002810 * it up front.
2811 */
2812 ret = ocfs2_rotate_rightmost_leaf_left(inode, handle,
2813 path_leaf_bh(path),
2814 path_leaf_el(path));
2815 if (ret)
2816 mlog_errno(ret);
2817 goto out;
2818 }
2819
2820 /*
2821 * Handle rightmost branch now. There's several cases:
2822 * 1) simple rotation leaving records in there. That's trivial.
2823 * 2) rotation requiring a branch delete - there's no more
2824 * records left. Two cases of this:
2825 * a) There are branches to the left.
2826 * b) This is also the leftmost (the only) branch.
2827 *
2828 * 1) is handled via ocfs2_rotate_rightmost_leaf_left()
2829 * 2a) we need the left branch so that we can update it with the unlink
2830 * 2b) we need to bring the inode back to inline extents.
2831 */
2832
2833 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
2834 el = &eb->h_list;
2835 if (eb->h_next_leaf_blk == 0) {
2836 /*
2837 * This gets a bit tricky if we're going to delete the
2838 * rightmost path. Get the other cases out of the way
2839 * 1st.
2840 */
2841 if (le16_to_cpu(el->l_next_free_rec) > 1)
2842 goto rightmost_no_delete;
2843
2844 if (le16_to_cpu(el->l_next_free_rec) == 0) {
2845 ret = -EIO;
2846 ocfs2_error(inode->i_sb,
2847 "Inode %llu has empty extent block at %llu",
2848 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2849 (unsigned long long)le64_to_cpu(eb->h_blkno));
2850 goto out;
2851 }
2852
2853 /*
2854 * XXX: The caller can not trust "path" any more after
2855 * this as it will have been deleted. What do we do?
2856 *
2857 * In theory the rotate-for-merge code will never get
2858 * here because it'll always ask for a rotate in a
2859 * nonempty list.
2860 */
2861
2862 ret = ocfs2_remove_rightmost_path(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08002863 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002864 if (ret)
2865 mlog_errno(ret);
2866 goto out;
2867 }
2868
2869 /*
2870 * Now we can loop, remembering the path we get from -EAGAIN
2871 * and restarting from there.
2872 */
2873try_rotate:
2874 ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08002875 dealloc, &restart_path, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002876 if (ret && ret != -EAGAIN) {
2877 mlog_errno(ret);
2878 goto out;
2879 }
2880
2881 while (ret == -EAGAIN) {
2882 tmp_path = restart_path;
2883 restart_path = NULL;
2884
2885 ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits,
2886 tmp_path, dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08002887 &restart_path, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002888 if (ret && ret != -EAGAIN) {
2889 mlog_errno(ret);
2890 goto out;
2891 }
2892
2893 ocfs2_free_path(tmp_path);
2894 tmp_path = NULL;
2895
2896 if (ret == 0)
2897 goto try_rotate;
2898 }
2899
2900out:
2901 ocfs2_free_path(tmp_path);
2902 ocfs2_free_path(restart_path);
2903 return ret;
2904}
2905
2906static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el,
2907 int index)
2908{
2909 struct ocfs2_extent_rec *rec = &el->l_recs[index];
2910 unsigned int size;
2911
2912 if (rec->e_leaf_clusters == 0) {
2913 /*
2914 * We consumed all of the merged-from record. An empty
2915 * extent cannot exist anywhere but the 1st array
2916 * position, so move things over if the merged-from
2917 * record doesn't occupy that position.
2918 *
2919 * This creates a new empty extent so the caller
2920 * should be smart enough to have removed any existing
2921 * ones.
2922 */
2923 if (index > 0) {
2924 BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
2925 size = index * sizeof(struct ocfs2_extent_rec);
2926 memmove(&el->l_recs[1], &el->l_recs[0], size);
2927 }
2928
2929 /*
2930 * Always memset - the caller doesn't check whether it
2931 * created an empty extent, so there could be junk in
2932 * the other fields.
2933 */
2934 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
2935 }
2936}
2937
Tao Ma677b9752008-01-30 14:21:05 +08002938static int ocfs2_get_right_path(struct inode *inode,
2939 struct ocfs2_path *left_path,
2940 struct ocfs2_path **ret_right_path)
Mark Fasheh328d5752007-06-18 10:48:04 -07002941{
2942 int ret;
Tao Ma677b9752008-01-30 14:21:05 +08002943 u32 right_cpos;
2944 struct ocfs2_path *right_path = NULL;
2945 struct ocfs2_extent_list *left_el;
2946
2947 *ret_right_path = NULL;
2948
2949 /* This function shouldn't be called for non-trees. */
2950 BUG_ON(left_path->p_tree_depth == 0);
2951
2952 left_el = path_leaf_el(left_path);
2953 BUG_ON(left_el->l_next_free_rec != left_el->l_count);
2954
2955 ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path,
2956 &right_cpos);
2957 if (ret) {
2958 mlog_errno(ret);
2959 goto out;
2960 }
2961
2962 /* This function shouldn't be called for the rightmost leaf. */
2963 BUG_ON(right_cpos == 0);
2964
2965 right_path = ocfs2_new_path(path_root_bh(left_path),
2966 path_root_el(left_path));
2967 if (!right_path) {
2968 ret = -ENOMEM;
2969 mlog_errno(ret);
2970 goto out;
2971 }
2972
2973 ret = ocfs2_find_path(inode, right_path, right_cpos);
2974 if (ret) {
2975 mlog_errno(ret);
2976 goto out;
2977 }
2978
2979 *ret_right_path = right_path;
2980out:
2981 if (ret)
2982 ocfs2_free_path(right_path);
2983 return ret;
2984}
2985
2986/*
2987 * Remove split_rec clusters from the record at index and merge them
2988 * onto the beginning of the record "next" to it.
2989 * For index < l_count - 1, the next means the extent rec at index + 1.
2990 * For index == l_count - 1, the "next" means the 1st extent rec of the
2991 * next extent block.
2992 */
2993static int ocfs2_merge_rec_right(struct inode *inode,
2994 struct ocfs2_path *left_path,
2995 handle_t *handle,
2996 struct ocfs2_extent_rec *split_rec,
2997 int index)
2998{
2999 int ret, next_free, i;
Mark Fasheh328d5752007-06-18 10:48:04 -07003000 unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
3001 struct ocfs2_extent_rec *left_rec;
3002 struct ocfs2_extent_rec *right_rec;
Tao Ma677b9752008-01-30 14:21:05 +08003003 struct ocfs2_extent_list *right_el;
3004 struct ocfs2_path *right_path = NULL;
3005 int subtree_index = 0;
3006 struct ocfs2_extent_list *el = path_leaf_el(left_path);
3007 struct buffer_head *bh = path_leaf_bh(left_path);
3008 struct buffer_head *root_bh = NULL;
Mark Fasheh328d5752007-06-18 10:48:04 -07003009
3010 BUG_ON(index >= le16_to_cpu(el->l_next_free_rec));
Mark Fasheh328d5752007-06-18 10:48:04 -07003011 left_rec = &el->l_recs[index];
Tao Ma677b9752008-01-30 14:21:05 +08003012
Al Viro9d8df6a2008-05-21 06:32:11 +01003013 if (index == le16_to_cpu(el->l_next_free_rec) - 1 &&
Tao Ma677b9752008-01-30 14:21:05 +08003014 le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) {
3015 /* we meet with a cross extent block merge. */
3016 ret = ocfs2_get_right_path(inode, left_path, &right_path);
3017 if (ret) {
3018 mlog_errno(ret);
3019 goto out;
3020 }
3021
3022 right_el = path_leaf_el(right_path);
3023 next_free = le16_to_cpu(right_el->l_next_free_rec);
3024 BUG_ON(next_free <= 0);
3025 right_rec = &right_el->l_recs[0];
3026 if (ocfs2_is_empty_extent(right_rec)) {
Al Viro9d8df6a2008-05-21 06:32:11 +01003027 BUG_ON(next_free <= 1);
Tao Ma677b9752008-01-30 14:21:05 +08003028 right_rec = &right_el->l_recs[1];
3029 }
3030
3031 BUG_ON(le32_to_cpu(left_rec->e_cpos) +
3032 le16_to_cpu(left_rec->e_leaf_clusters) !=
3033 le32_to_cpu(right_rec->e_cpos));
3034
3035 subtree_index = ocfs2_find_subtree_root(inode,
3036 left_path, right_path);
3037
3038 ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
3039 handle->h_buffer_credits,
3040 right_path);
3041 if (ret) {
3042 mlog_errno(ret);
3043 goto out;
3044 }
3045
3046 root_bh = left_path->p_node[subtree_index].bh;
3047 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
3048
3049 ret = ocfs2_journal_access(handle, inode, root_bh,
3050 OCFS2_JOURNAL_ACCESS_WRITE);
3051 if (ret) {
3052 mlog_errno(ret);
3053 goto out;
3054 }
3055
3056 for (i = subtree_index + 1;
3057 i < path_num_items(right_path); i++) {
3058 ret = ocfs2_journal_access(handle, inode,
3059 right_path->p_node[i].bh,
3060 OCFS2_JOURNAL_ACCESS_WRITE);
3061 if (ret) {
3062 mlog_errno(ret);
3063 goto out;
3064 }
3065
3066 ret = ocfs2_journal_access(handle, inode,
3067 left_path->p_node[i].bh,
3068 OCFS2_JOURNAL_ACCESS_WRITE);
3069 if (ret) {
3070 mlog_errno(ret);
3071 goto out;
3072 }
3073 }
3074
3075 } else {
3076 BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1);
3077 right_rec = &el->l_recs[index + 1];
3078 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003079
3080 ret = ocfs2_journal_access(handle, inode, bh,
3081 OCFS2_JOURNAL_ACCESS_WRITE);
3082 if (ret) {
3083 mlog_errno(ret);
3084 goto out;
3085 }
3086
3087 le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters);
3088
3089 le32_add_cpu(&right_rec->e_cpos, -split_clusters);
3090 le64_add_cpu(&right_rec->e_blkno,
3091 -ocfs2_clusters_to_blocks(inode->i_sb, split_clusters));
3092 le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters);
3093
3094 ocfs2_cleanup_merge(el, index);
3095
3096 ret = ocfs2_journal_dirty(handle, bh);
3097 if (ret)
3098 mlog_errno(ret);
3099
Tao Ma677b9752008-01-30 14:21:05 +08003100 if (right_path) {
3101 ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
3102 if (ret)
3103 mlog_errno(ret);
3104
3105 ocfs2_complete_edge_insert(inode, handle, left_path,
3106 right_path, subtree_index);
3107 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003108out:
Tao Ma677b9752008-01-30 14:21:05 +08003109 if (right_path)
3110 ocfs2_free_path(right_path);
3111 return ret;
3112}
3113
3114static int ocfs2_get_left_path(struct inode *inode,
3115 struct ocfs2_path *right_path,
3116 struct ocfs2_path **ret_left_path)
3117{
3118 int ret;
3119 u32 left_cpos;
3120 struct ocfs2_path *left_path = NULL;
3121
3122 *ret_left_path = NULL;
3123
3124 /* This function shouldn't be called for non-trees. */
3125 BUG_ON(right_path->p_tree_depth == 0);
3126
3127 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
3128 right_path, &left_cpos);
3129 if (ret) {
3130 mlog_errno(ret);
3131 goto out;
3132 }
3133
3134 /* This function shouldn't be called for the leftmost leaf. */
3135 BUG_ON(left_cpos == 0);
3136
3137 left_path = ocfs2_new_path(path_root_bh(right_path),
3138 path_root_el(right_path));
3139 if (!left_path) {
3140 ret = -ENOMEM;
3141 mlog_errno(ret);
3142 goto out;
3143 }
3144
3145 ret = ocfs2_find_path(inode, left_path, left_cpos);
3146 if (ret) {
3147 mlog_errno(ret);
3148 goto out;
3149 }
3150
3151 *ret_left_path = left_path;
3152out:
3153 if (ret)
3154 ocfs2_free_path(left_path);
Mark Fasheh328d5752007-06-18 10:48:04 -07003155 return ret;
3156}
3157
3158/*
3159 * Remove split_rec clusters from the record at index and merge them
Tao Ma677b9752008-01-30 14:21:05 +08003160 * onto the tail of the record "before" it.
3161 * For index > 0, the "before" means the extent rec at index - 1.
3162 *
3163 * For index == 0, the "before" means the last record of the previous
3164 * extent block. And there is also a situation that we may need to
3165 * remove the rightmost leaf extent block in the right_path and change
3166 * the right path to indicate the new rightmost path.
Mark Fasheh328d5752007-06-18 10:48:04 -07003167 */
Tao Ma677b9752008-01-30 14:21:05 +08003168static int ocfs2_merge_rec_left(struct inode *inode,
3169 struct ocfs2_path *right_path,
Mark Fasheh328d5752007-06-18 10:48:04 -07003170 handle_t *handle,
3171 struct ocfs2_extent_rec *split_rec,
Tao Ma677b9752008-01-30 14:21:05 +08003172 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08003173 struct ocfs2_extent_tree *et,
Tao Ma677b9752008-01-30 14:21:05 +08003174 int index)
Mark Fasheh328d5752007-06-18 10:48:04 -07003175{
Tao Ma677b9752008-01-30 14:21:05 +08003176 int ret, i, subtree_index = 0, has_empty_extent = 0;
Mark Fasheh328d5752007-06-18 10:48:04 -07003177 unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
3178 struct ocfs2_extent_rec *left_rec;
3179 struct ocfs2_extent_rec *right_rec;
Tao Ma677b9752008-01-30 14:21:05 +08003180 struct ocfs2_extent_list *el = path_leaf_el(right_path);
3181 struct buffer_head *bh = path_leaf_bh(right_path);
3182 struct buffer_head *root_bh = NULL;
3183 struct ocfs2_path *left_path = NULL;
3184 struct ocfs2_extent_list *left_el;
Mark Fasheh328d5752007-06-18 10:48:04 -07003185
Tao Ma677b9752008-01-30 14:21:05 +08003186 BUG_ON(index < 0);
Mark Fasheh328d5752007-06-18 10:48:04 -07003187
Mark Fasheh328d5752007-06-18 10:48:04 -07003188 right_rec = &el->l_recs[index];
Tao Ma677b9752008-01-30 14:21:05 +08003189 if (index == 0) {
3190 /* we meet with a cross extent block merge. */
3191 ret = ocfs2_get_left_path(inode, right_path, &left_path);
3192 if (ret) {
3193 mlog_errno(ret);
3194 goto out;
3195 }
3196
3197 left_el = path_leaf_el(left_path);
3198 BUG_ON(le16_to_cpu(left_el->l_next_free_rec) !=
3199 le16_to_cpu(left_el->l_count));
3200
3201 left_rec = &left_el->l_recs[
3202 le16_to_cpu(left_el->l_next_free_rec) - 1];
3203 BUG_ON(le32_to_cpu(left_rec->e_cpos) +
3204 le16_to_cpu(left_rec->e_leaf_clusters) !=
3205 le32_to_cpu(split_rec->e_cpos));
3206
3207 subtree_index = ocfs2_find_subtree_root(inode,
3208 left_path, right_path);
3209
3210 ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
3211 handle->h_buffer_credits,
3212 left_path);
3213 if (ret) {
3214 mlog_errno(ret);
3215 goto out;
3216 }
3217
3218 root_bh = left_path->p_node[subtree_index].bh;
3219 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
3220
3221 ret = ocfs2_journal_access(handle, inode, root_bh,
3222 OCFS2_JOURNAL_ACCESS_WRITE);
3223 if (ret) {
3224 mlog_errno(ret);
3225 goto out;
3226 }
3227
3228 for (i = subtree_index + 1;
3229 i < path_num_items(right_path); i++) {
3230 ret = ocfs2_journal_access(handle, inode,
3231 right_path->p_node[i].bh,
3232 OCFS2_JOURNAL_ACCESS_WRITE);
3233 if (ret) {
3234 mlog_errno(ret);
3235 goto out;
3236 }
3237
3238 ret = ocfs2_journal_access(handle, inode,
3239 left_path->p_node[i].bh,
3240 OCFS2_JOURNAL_ACCESS_WRITE);
3241 if (ret) {
3242 mlog_errno(ret);
3243 goto out;
3244 }
3245 }
3246 } else {
3247 left_rec = &el->l_recs[index - 1];
3248 if (ocfs2_is_empty_extent(&el->l_recs[0]))
3249 has_empty_extent = 1;
3250 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003251
3252 ret = ocfs2_journal_access(handle, inode, bh,
3253 OCFS2_JOURNAL_ACCESS_WRITE);
3254 if (ret) {
3255 mlog_errno(ret);
3256 goto out;
3257 }
3258
3259 if (has_empty_extent && index == 1) {
3260 /*
3261 * The easy case - we can just plop the record right in.
3262 */
3263 *left_rec = *split_rec;
3264
3265 has_empty_extent = 0;
Tao Ma677b9752008-01-30 14:21:05 +08003266 } else
Mark Fasheh328d5752007-06-18 10:48:04 -07003267 le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters);
Mark Fasheh328d5752007-06-18 10:48:04 -07003268
3269 le32_add_cpu(&right_rec->e_cpos, split_clusters);
3270 le64_add_cpu(&right_rec->e_blkno,
3271 ocfs2_clusters_to_blocks(inode->i_sb, split_clusters));
3272 le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters);
3273
3274 ocfs2_cleanup_merge(el, index);
3275
3276 ret = ocfs2_journal_dirty(handle, bh);
3277 if (ret)
3278 mlog_errno(ret);
3279
Tao Ma677b9752008-01-30 14:21:05 +08003280 if (left_path) {
3281 ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
3282 if (ret)
3283 mlog_errno(ret);
3284
3285 /*
3286 * In the situation that the right_rec is empty and the extent
3287 * block is empty also, ocfs2_complete_edge_insert can't handle
3288 * it and we need to delete the right extent block.
3289 */
3290 if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 &&
3291 le16_to_cpu(el->l_next_free_rec) == 1) {
3292
3293 ret = ocfs2_remove_rightmost_path(inode, handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08003294 right_path,
3295 dealloc, et);
Tao Ma677b9752008-01-30 14:21:05 +08003296 if (ret) {
3297 mlog_errno(ret);
3298 goto out;
3299 }
3300
3301 /* Now the rightmost extent block has been deleted.
3302 * So we use the new rightmost path.
3303 */
3304 ocfs2_mv_path(right_path, left_path);
3305 left_path = NULL;
3306 } else
3307 ocfs2_complete_edge_insert(inode, handle, left_path,
3308 right_path, subtree_index);
3309 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003310out:
Tao Ma677b9752008-01-30 14:21:05 +08003311 if (left_path)
3312 ocfs2_free_path(left_path);
Mark Fasheh328d5752007-06-18 10:48:04 -07003313 return ret;
3314}
3315
3316static int ocfs2_try_to_merge_extent(struct inode *inode,
3317 handle_t *handle,
Tao Ma677b9752008-01-30 14:21:05 +08003318 struct ocfs2_path *path,
Mark Fasheh328d5752007-06-18 10:48:04 -07003319 int split_index,
3320 struct ocfs2_extent_rec *split_rec,
3321 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08003322 struct ocfs2_merge_ctxt *ctxt,
3323 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07003324
3325{
Tao Mao518d7262007-08-28 17:25:35 -07003326 int ret = 0;
Tao Ma677b9752008-01-30 14:21:05 +08003327 struct ocfs2_extent_list *el = path_leaf_el(path);
Mark Fasheh328d5752007-06-18 10:48:04 -07003328 struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
3329
3330 BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
3331
Tao Mao518d7262007-08-28 17:25:35 -07003332 if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
3333 /*
3334 * The merge code will need to create an empty
3335 * extent to take the place of the newly
3336 * emptied slot. Remove any pre-existing empty
3337 * extents - having more than one in a leaf is
3338 * illegal.
3339 */
Tao Ma677b9752008-01-30 14:21:05 +08003340 ret = ocfs2_rotate_tree_left(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08003341 dealloc, et);
Tao Mao518d7262007-08-28 17:25:35 -07003342 if (ret) {
3343 mlog_errno(ret);
3344 goto out;
Mark Fasheh328d5752007-06-18 10:48:04 -07003345 }
Tao Mao518d7262007-08-28 17:25:35 -07003346 split_index--;
3347 rec = &el->l_recs[split_index];
Mark Fasheh328d5752007-06-18 10:48:04 -07003348 }
3349
3350 if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) {
3351 /*
3352 * Left-right contig implies this.
3353 */
3354 BUG_ON(!ctxt->c_split_covers_rec);
Mark Fasheh328d5752007-06-18 10:48:04 -07003355
3356 /*
3357 * Since the leftright insert always covers the entire
3358 * extent, this call will delete the insert record
3359 * entirely, resulting in an empty extent record added to
3360 * the extent block.
3361 *
3362 * Since the adding of an empty extent shifts
3363 * everything back to the right, there's no need to
3364 * update split_index here.
Tao Ma677b9752008-01-30 14:21:05 +08003365 *
3366 * When the split_index is zero, we need to merge it to the
3367 * prevoius extent block. It is more efficient and easier
3368 * if we do merge_right first and merge_left later.
Mark Fasheh328d5752007-06-18 10:48:04 -07003369 */
Tao Ma677b9752008-01-30 14:21:05 +08003370 ret = ocfs2_merge_rec_right(inode, path,
3371 handle, split_rec,
3372 split_index);
Mark Fasheh328d5752007-06-18 10:48:04 -07003373 if (ret) {
3374 mlog_errno(ret);
3375 goto out;
3376 }
3377
3378 /*
3379 * We can only get this from logic error above.
3380 */
3381 BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
3382
Tao Ma677b9752008-01-30 14:21:05 +08003383 /* The merge left us with an empty extent, remove it. */
Tao Mae7d4cb62008-08-18 17:38:44 +08003384 ret = ocfs2_rotate_tree_left(inode, handle, path,
3385 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07003386 if (ret) {
3387 mlog_errno(ret);
3388 goto out;
3389 }
Tao Ma677b9752008-01-30 14:21:05 +08003390
Mark Fasheh328d5752007-06-18 10:48:04 -07003391 rec = &el->l_recs[split_index];
3392
3393 /*
3394 * Note that we don't pass split_rec here on purpose -
Tao Ma677b9752008-01-30 14:21:05 +08003395 * we've merged it into the rec already.
Mark Fasheh328d5752007-06-18 10:48:04 -07003396 */
Tao Ma677b9752008-01-30 14:21:05 +08003397 ret = ocfs2_merge_rec_left(inode, path,
3398 handle, rec,
Tao Mae7d4cb62008-08-18 17:38:44 +08003399 dealloc, et,
Tao Ma677b9752008-01-30 14:21:05 +08003400 split_index);
3401
Mark Fasheh328d5752007-06-18 10:48:04 -07003402 if (ret) {
3403 mlog_errno(ret);
3404 goto out;
3405 }
3406
Tao Ma677b9752008-01-30 14:21:05 +08003407 ret = ocfs2_rotate_tree_left(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08003408 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07003409 /*
3410 * Error from this last rotate is not critical, so
3411 * print but don't bubble it up.
3412 */
3413 if (ret)
3414 mlog_errno(ret);
3415 ret = 0;
3416 } else {
3417 /*
3418 * Merge a record to the left or right.
3419 *
3420 * 'contig_type' is relative to the existing record,
3421 * so for example, if we're "right contig", it's to
3422 * the record on the left (hence the left merge).
3423 */
3424 if (ctxt->c_contig_type == CONTIG_RIGHT) {
3425 ret = ocfs2_merge_rec_left(inode,
Tao Ma677b9752008-01-30 14:21:05 +08003426 path,
3427 handle, split_rec,
Tao Mae7d4cb62008-08-18 17:38:44 +08003428 dealloc, et,
Mark Fasheh328d5752007-06-18 10:48:04 -07003429 split_index);
3430 if (ret) {
3431 mlog_errno(ret);
3432 goto out;
3433 }
3434 } else {
3435 ret = ocfs2_merge_rec_right(inode,
Tao Ma677b9752008-01-30 14:21:05 +08003436 path,
3437 handle, split_rec,
Mark Fasheh328d5752007-06-18 10:48:04 -07003438 split_index);
3439 if (ret) {
3440 mlog_errno(ret);
3441 goto out;
3442 }
3443 }
3444
3445 if (ctxt->c_split_covers_rec) {
3446 /*
3447 * The merge may have left an empty extent in
3448 * our leaf. Try to rotate it away.
3449 */
Tao Ma677b9752008-01-30 14:21:05 +08003450 ret = ocfs2_rotate_tree_left(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08003451 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07003452 if (ret)
3453 mlog_errno(ret);
3454 ret = 0;
3455 }
3456 }
3457
3458out:
3459 return ret;
3460}
3461
3462static void ocfs2_subtract_from_rec(struct super_block *sb,
3463 enum ocfs2_split_type split,
3464 struct ocfs2_extent_rec *rec,
3465 struct ocfs2_extent_rec *split_rec)
3466{
3467 u64 len_blocks;
3468
3469 len_blocks = ocfs2_clusters_to_blocks(sb,
3470 le16_to_cpu(split_rec->e_leaf_clusters));
3471
3472 if (split == SPLIT_LEFT) {
3473 /*
3474 * Region is on the left edge of the existing
3475 * record.
3476 */
3477 le32_add_cpu(&rec->e_cpos,
3478 le16_to_cpu(split_rec->e_leaf_clusters));
3479 le64_add_cpu(&rec->e_blkno, len_blocks);
3480 le16_add_cpu(&rec->e_leaf_clusters,
3481 -le16_to_cpu(split_rec->e_leaf_clusters));
3482 } else {
3483 /*
3484 * Region is on the right edge of the existing
3485 * record.
3486 */
3487 le16_add_cpu(&rec->e_leaf_clusters,
3488 -le16_to_cpu(split_rec->e_leaf_clusters));
3489 }
3490}
3491
Mark Fashehdcd05382007-01-16 11:32:23 -08003492/*
3493 * Do the final bits of extent record insertion at the target leaf
3494 * list. If this leaf is part of an allocation tree, it is assumed
3495 * that the tree above has been prepared.
3496 */
3497static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec,
3498 struct ocfs2_extent_list *el,
3499 struct ocfs2_insert_type *insert,
3500 struct inode *inode)
3501{
3502 int i = insert->ins_contig_index;
3503 unsigned int range;
3504 struct ocfs2_extent_rec *rec;
3505
Mark Fashehe48edee2007-03-07 16:46:57 -08003506 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
Mark Fashehdcd05382007-01-16 11:32:23 -08003507
Mark Fasheh328d5752007-06-18 10:48:04 -07003508 if (insert->ins_split != SPLIT_NONE) {
3509 i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos));
3510 BUG_ON(i == -1);
3511 rec = &el->l_recs[i];
3512 ocfs2_subtract_from_rec(inode->i_sb, insert->ins_split, rec,
3513 insert_rec);
3514 goto rotate;
3515 }
3516
Mark Fashehdcd05382007-01-16 11:32:23 -08003517 /*
3518 * Contiguous insert - either left or right.
3519 */
3520 if (insert->ins_contig != CONTIG_NONE) {
3521 rec = &el->l_recs[i];
3522 if (insert->ins_contig == CONTIG_LEFT) {
3523 rec->e_blkno = insert_rec->e_blkno;
3524 rec->e_cpos = insert_rec->e_cpos;
3525 }
Mark Fashehe48edee2007-03-07 16:46:57 -08003526 le16_add_cpu(&rec->e_leaf_clusters,
3527 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08003528 return;
3529 }
3530
3531 /*
3532 * Handle insert into an empty leaf.
3533 */
3534 if (le16_to_cpu(el->l_next_free_rec) == 0 ||
3535 ((le16_to_cpu(el->l_next_free_rec) == 1) &&
3536 ocfs2_is_empty_extent(&el->l_recs[0]))) {
3537 el->l_recs[0] = *insert_rec;
3538 el->l_next_free_rec = cpu_to_le16(1);
3539 return;
3540 }
3541
3542 /*
3543 * Appending insert.
3544 */
3545 if (insert->ins_appending == APPEND_TAIL) {
3546 i = le16_to_cpu(el->l_next_free_rec) - 1;
3547 rec = &el->l_recs[i];
Mark Fashehe48edee2007-03-07 16:46:57 -08003548 range = le32_to_cpu(rec->e_cpos)
3549 + le16_to_cpu(rec->e_leaf_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08003550 BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
3551
3552 mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
3553 le16_to_cpu(el->l_count),
3554 "inode %lu, depth %u, count %u, next free %u, "
3555 "rec.cpos %u, rec.clusters %u, "
3556 "insert.cpos %u, insert.clusters %u\n",
3557 inode->i_ino,
3558 le16_to_cpu(el->l_tree_depth),
3559 le16_to_cpu(el->l_count),
3560 le16_to_cpu(el->l_next_free_rec),
3561 le32_to_cpu(el->l_recs[i].e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08003562 le16_to_cpu(el->l_recs[i].e_leaf_clusters),
Mark Fashehdcd05382007-01-16 11:32:23 -08003563 le32_to_cpu(insert_rec->e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08003564 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08003565 i++;
3566 el->l_recs[i] = *insert_rec;
3567 le16_add_cpu(&el->l_next_free_rec, 1);
3568 return;
3569 }
3570
Mark Fasheh328d5752007-06-18 10:48:04 -07003571rotate:
Mark Fashehdcd05382007-01-16 11:32:23 -08003572 /*
3573 * Ok, we have to rotate.
3574 *
3575 * At this point, it is safe to assume that inserting into an
3576 * empty leaf and appending to a leaf have both been handled
3577 * above.
3578 *
3579 * This leaf needs to have space, either by the empty 1st
3580 * extent record, or by virtue of an l_next_rec < l_count.
3581 */
3582 ocfs2_rotate_leaf(el, insert_rec);
3583}
3584
Mark Fasheh328d5752007-06-18 10:48:04 -07003585static void ocfs2_adjust_rightmost_records(struct inode *inode,
3586 handle_t *handle,
3587 struct ocfs2_path *path,
3588 struct ocfs2_extent_rec *insert_rec)
3589{
3590 int ret, i, next_free;
3591 struct buffer_head *bh;
3592 struct ocfs2_extent_list *el;
3593 struct ocfs2_extent_rec *rec;
3594
3595 /*
3596 * Update everything except the leaf block.
3597 */
3598 for (i = 0; i < path->p_tree_depth; i++) {
3599 bh = path->p_node[i].bh;
3600 el = path->p_node[i].el;
3601
3602 next_free = le16_to_cpu(el->l_next_free_rec);
3603 if (next_free == 0) {
3604 ocfs2_error(inode->i_sb,
3605 "Dinode %llu has a bad extent list",
3606 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3607 ret = -EIO;
3608 return;
3609 }
3610
3611 rec = &el->l_recs[next_free - 1];
3612
3613 rec->e_int_clusters = insert_rec->e_cpos;
3614 le32_add_cpu(&rec->e_int_clusters,
3615 le16_to_cpu(insert_rec->e_leaf_clusters));
3616 le32_add_cpu(&rec->e_int_clusters,
3617 -le32_to_cpu(rec->e_cpos));
3618
3619 ret = ocfs2_journal_dirty(handle, bh);
3620 if (ret)
3621 mlog_errno(ret);
3622
3623 }
3624}
3625
Mark Fashehdcd05382007-01-16 11:32:23 -08003626static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
3627 struct ocfs2_extent_rec *insert_rec,
3628 struct ocfs2_path *right_path,
3629 struct ocfs2_path **ret_left_path)
3630{
Mark Fasheh328d5752007-06-18 10:48:04 -07003631 int ret, next_free;
Mark Fashehdcd05382007-01-16 11:32:23 -08003632 struct ocfs2_extent_list *el;
3633 struct ocfs2_path *left_path = NULL;
3634
3635 *ret_left_path = NULL;
3636
3637 /*
Mark Fashehe48edee2007-03-07 16:46:57 -08003638 * This shouldn't happen for non-trees. The extent rec cluster
3639 * count manipulation below only works for interior nodes.
3640 */
3641 BUG_ON(right_path->p_tree_depth == 0);
3642
3643 /*
Mark Fashehdcd05382007-01-16 11:32:23 -08003644 * If our appending insert is at the leftmost edge of a leaf,
3645 * then we might need to update the rightmost records of the
3646 * neighboring path.
3647 */
3648 el = path_leaf_el(right_path);
3649 next_free = le16_to_cpu(el->l_next_free_rec);
3650 if (next_free == 0 ||
3651 (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) {
3652 u32 left_cpos;
3653
3654 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
3655 &left_cpos);
3656 if (ret) {
3657 mlog_errno(ret);
3658 goto out;
3659 }
3660
3661 mlog(0, "Append may need a left path update. cpos: %u, "
3662 "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos),
3663 left_cpos);
3664
3665 /*
3666 * No need to worry if the append is already in the
3667 * leftmost leaf.
3668 */
3669 if (left_cpos) {
3670 left_path = ocfs2_new_path(path_root_bh(right_path),
3671 path_root_el(right_path));
3672 if (!left_path) {
3673 ret = -ENOMEM;
3674 mlog_errno(ret);
3675 goto out;
3676 }
3677
3678 ret = ocfs2_find_path(inode, left_path, left_cpos);
3679 if (ret) {
3680 mlog_errno(ret);
3681 goto out;
3682 }
3683
3684 /*
3685 * ocfs2_insert_path() will pass the left_path to the
3686 * journal for us.
3687 */
3688 }
3689 }
3690
3691 ret = ocfs2_journal_access_path(inode, handle, right_path);
3692 if (ret) {
3693 mlog_errno(ret);
3694 goto out;
3695 }
3696
Mark Fasheh328d5752007-06-18 10:48:04 -07003697 ocfs2_adjust_rightmost_records(inode, handle, right_path, insert_rec);
Mark Fashehdcd05382007-01-16 11:32:23 -08003698
3699 *ret_left_path = left_path;
3700 ret = 0;
3701out:
3702 if (ret != 0)
3703 ocfs2_free_path(left_path);
3704
3705 return ret;
3706}
3707
Mark Fasheh328d5752007-06-18 10:48:04 -07003708static void ocfs2_split_record(struct inode *inode,
3709 struct ocfs2_path *left_path,
3710 struct ocfs2_path *right_path,
3711 struct ocfs2_extent_rec *split_rec,
3712 enum ocfs2_split_type split)
3713{
3714 int index;
3715 u32 cpos = le32_to_cpu(split_rec->e_cpos);
3716 struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el;
3717 struct ocfs2_extent_rec *rec, *tmprec;
3718
3719 right_el = path_leaf_el(right_path);;
3720 if (left_path)
3721 left_el = path_leaf_el(left_path);
3722
3723 el = right_el;
3724 insert_el = right_el;
3725 index = ocfs2_search_extent_list(el, cpos);
3726 if (index != -1) {
3727 if (index == 0 && left_path) {
3728 BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
3729
3730 /*
3731 * This typically means that the record
3732 * started in the left path but moved to the
3733 * right as a result of rotation. We either
3734 * move the existing record to the left, or we
3735 * do the later insert there.
3736 *
3737 * In this case, the left path should always
3738 * exist as the rotate code will have passed
3739 * it back for a post-insert update.
3740 */
3741
3742 if (split == SPLIT_LEFT) {
3743 /*
3744 * It's a left split. Since we know
3745 * that the rotate code gave us an
3746 * empty extent in the left path, we
3747 * can just do the insert there.
3748 */
3749 insert_el = left_el;
3750 } else {
3751 /*
3752 * Right split - we have to move the
3753 * existing record over to the left
3754 * leaf. The insert will be into the
3755 * newly created empty extent in the
3756 * right leaf.
3757 */
3758 tmprec = &right_el->l_recs[index];
3759 ocfs2_rotate_leaf(left_el, tmprec);
3760 el = left_el;
3761
3762 memset(tmprec, 0, sizeof(*tmprec));
3763 index = ocfs2_search_extent_list(left_el, cpos);
3764 BUG_ON(index == -1);
3765 }
3766 }
3767 } else {
3768 BUG_ON(!left_path);
3769 BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0]));
3770 /*
3771 * Left path is easy - we can just allow the insert to
3772 * happen.
3773 */
3774 el = left_el;
3775 insert_el = left_el;
3776 index = ocfs2_search_extent_list(el, cpos);
3777 BUG_ON(index == -1);
3778 }
3779
3780 rec = &el->l_recs[index];
3781 ocfs2_subtract_from_rec(inode->i_sb, split, rec, split_rec);
3782 ocfs2_rotate_leaf(insert_el, split_rec);
3783}
3784
Mark Fashehdcd05382007-01-16 11:32:23 -08003785/*
Tao Mae7d4cb62008-08-18 17:38:44 +08003786 * This function only does inserts on an allocation b-tree. For tree
3787 * depth = 0, ocfs2_insert_at_leaf() is called directly.
Mark Fashehdcd05382007-01-16 11:32:23 -08003788 *
3789 * right_path is the path we want to do the actual insert
3790 * in. left_path should only be passed in if we need to update that
3791 * portion of the tree after an edge insert.
3792 */
3793static int ocfs2_insert_path(struct inode *inode,
3794 handle_t *handle,
3795 struct ocfs2_path *left_path,
3796 struct ocfs2_path *right_path,
3797 struct ocfs2_extent_rec *insert_rec,
3798 struct ocfs2_insert_type *insert)
3799{
3800 int ret, subtree_index;
3801 struct buffer_head *leaf_bh = path_leaf_bh(right_path);
Mark Fashehdcd05382007-01-16 11:32:23 -08003802
Mark Fashehdcd05382007-01-16 11:32:23 -08003803 if (left_path) {
3804 int credits = handle->h_buffer_credits;
3805
3806 /*
3807 * There's a chance that left_path got passed back to
3808 * us without being accounted for in the
3809 * journal. Extend our transaction here to be sure we
3810 * can change those blocks.
3811 */
3812 credits += left_path->p_tree_depth;
3813
3814 ret = ocfs2_extend_trans(handle, credits);
3815 if (ret < 0) {
3816 mlog_errno(ret);
3817 goto out;
3818 }
3819
3820 ret = ocfs2_journal_access_path(inode, handle, left_path);
3821 if (ret < 0) {
3822 mlog_errno(ret);
3823 goto out;
3824 }
3825 }
3826
Mark Fashehe8aed342007-12-03 16:43:01 -08003827 /*
3828 * Pass both paths to the journal. The majority of inserts
3829 * will be touching all components anyway.
3830 */
3831 ret = ocfs2_journal_access_path(inode, handle, right_path);
3832 if (ret < 0) {
3833 mlog_errno(ret);
3834 goto out;
3835 }
3836
Mark Fasheh328d5752007-06-18 10:48:04 -07003837 if (insert->ins_split != SPLIT_NONE) {
3838 /*
3839 * We could call ocfs2_insert_at_leaf() for some types
Joe Perchesc78bad12008-02-03 17:33:42 +02003840 * of splits, but it's easier to just let one separate
Mark Fasheh328d5752007-06-18 10:48:04 -07003841 * function sort it all out.
3842 */
3843 ocfs2_split_record(inode, left_path, right_path,
3844 insert_rec, insert->ins_split);
Mark Fashehe8aed342007-12-03 16:43:01 -08003845
3846 /*
3847 * Split might have modified either leaf and we don't
3848 * have a guarantee that the later edge insert will
3849 * dirty this for us.
3850 */
3851 if (left_path)
3852 ret = ocfs2_journal_dirty(handle,
3853 path_leaf_bh(left_path));
3854 if (ret)
3855 mlog_errno(ret);
Mark Fasheh328d5752007-06-18 10:48:04 -07003856 } else
3857 ocfs2_insert_at_leaf(insert_rec, path_leaf_el(right_path),
3858 insert, inode);
Mark Fashehdcd05382007-01-16 11:32:23 -08003859
Mark Fashehdcd05382007-01-16 11:32:23 -08003860 ret = ocfs2_journal_dirty(handle, leaf_bh);
3861 if (ret)
3862 mlog_errno(ret);
3863
3864 if (left_path) {
3865 /*
3866 * The rotate code has indicated that we need to fix
3867 * up portions of the tree after the insert.
3868 *
3869 * XXX: Should we extend the transaction here?
3870 */
3871 subtree_index = ocfs2_find_subtree_root(inode, left_path,
3872 right_path);
3873 ocfs2_complete_edge_insert(inode, handle, left_path,
3874 right_path, subtree_index);
3875 }
3876
3877 ret = 0;
3878out:
3879 return ret;
3880}
3881
3882static int ocfs2_do_insert_extent(struct inode *inode,
3883 handle_t *handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08003884 struct ocfs2_extent_tree *et,
Mark Fashehdcd05382007-01-16 11:32:23 -08003885 struct ocfs2_extent_rec *insert_rec,
3886 struct ocfs2_insert_type *type)
3887{
3888 int ret, rotate = 0;
3889 u32 cpos;
3890 struct ocfs2_path *right_path = NULL;
3891 struct ocfs2_path *left_path = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08003892 struct ocfs2_extent_list *el;
3893
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003894 el = et->et_root_el;
Mark Fashehdcd05382007-01-16 11:32:23 -08003895
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003896 ret = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehdcd05382007-01-16 11:32:23 -08003897 OCFS2_JOURNAL_ACCESS_WRITE);
3898 if (ret) {
3899 mlog_errno(ret);
3900 goto out;
3901 }
3902
3903 if (le16_to_cpu(el->l_tree_depth) == 0) {
3904 ocfs2_insert_at_leaf(insert_rec, el, type, inode);
3905 goto out_update_clusters;
3906 }
3907
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003908 right_path = ocfs2_new_path(et->et_root_bh, et->et_root_el);
Mark Fashehdcd05382007-01-16 11:32:23 -08003909 if (!right_path) {
3910 ret = -ENOMEM;
3911 mlog_errno(ret);
3912 goto out;
3913 }
3914
3915 /*
3916 * Determine the path to start with. Rotations need the
3917 * rightmost path, everything else can go directly to the
3918 * target leaf.
3919 */
3920 cpos = le32_to_cpu(insert_rec->e_cpos);
3921 if (type->ins_appending == APPEND_NONE &&
3922 type->ins_contig == CONTIG_NONE) {
3923 rotate = 1;
3924 cpos = UINT_MAX;
3925 }
3926
3927 ret = ocfs2_find_path(inode, right_path, cpos);
3928 if (ret) {
3929 mlog_errno(ret);
3930 goto out;
3931 }
3932
3933 /*
3934 * Rotations and appends need special treatment - they modify
3935 * parts of the tree's above them.
3936 *
3937 * Both might pass back a path immediate to the left of the
3938 * one being inserted to. This will be cause
3939 * ocfs2_insert_path() to modify the rightmost records of
3940 * left_path to account for an edge insert.
3941 *
3942 * XXX: When modifying this code, keep in mind that an insert
3943 * can wind up skipping both of these two special cases...
3944 */
3945 if (rotate) {
Mark Fasheh328d5752007-06-18 10:48:04 -07003946 ret = ocfs2_rotate_tree_right(inode, handle, type->ins_split,
Mark Fashehdcd05382007-01-16 11:32:23 -08003947 le32_to_cpu(insert_rec->e_cpos),
3948 right_path, &left_path);
3949 if (ret) {
3950 mlog_errno(ret);
3951 goto out;
3952 }
Mark Fashehe8aed342007-12-03 16:43:01 -08003953
3954 /*
3955 * ocfs2_rotate_tree_right() might have extended the
3956 * transaction without re-journaling our tree root.
3957 */
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003958 ret = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehe8aed342007-12-03 16:43:01 -08003959 OCFS2_JOURNAL_ACCESS_WRITE);
3960 if (ret) {
3961 mlog_errno(ret);
3962 goto out;
3963 }
Mark Fashehdcd05382007-01-16 11:32:23 -08003964 } else if (type->ins_appending == APPEND_TAIL
3965 && type->ins_contig != CONTIG_LEFT) {
3966 ret = ocfs2_append_rec_to_path(inode, handle, insert_rec,
3967 right_path, &left_path);
3968 if (ret) {
3969 mlog_errno(ret);
3970 goto out;
3971 }
3972 }
3973
3974 ret = ocfs2_insert_path(inode, handle, left_path, right_path,
3975 insert_rec, type);
3976 if (ret) {
3977 mlog_errno(ret);
3978 goto out;
3979 }
3980
3981out_update_clusters:
Mark Fasheh328d5752007-06-18 10:48:04 -07003982 if (type->ins_split == SPLIT_NONE)
Joel Becker35dc0aa2008-08-20 16:25:06 -07003983 ocfs2_et_update_clusters(inode, et,
3984 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08003985
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003986 ret = ocfs2_journal_dirty(handle, et->et_root_bh);
Mark Fashehdcd05382007-01-16 11:32:23 -08003987 if (ret)
3988 mlog_errno(ret);
3989
3990out:
3991 ocfs2_free_path(left_path);
3992 ocfs2_free_path(right_path);
3993
3994 return ret;
3995}
3996
Mark Fasheh328d5752007-06-18 10:48:04 -07003997static enum ocfs2_contig_type
Tao Maad5a4d72008-01-30 14:21:32 +08003998ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
Mark Fasheh328d5752007-06-18 10:48:04 -07003999 struct ocfs2_extent_list *el, int index,
4000 struct ocfs2_extent_rec *split_rec)
4001{
Tao Maad5a4d72008-01-30 14:21:32 +08004002 int status;
Mark Fasheh328d5752007-06-18 10:48:04 -07004003 enum ocfs2_contig_type ret = CONTIG_NONE;
Tao Maad5a4d72008-01-30 14:21:32 +08004004 u32 left_cpos, right_cpos;
4005 struct ocfs2_extent_rec *rec = NULL;
4006 struct ocfs2_extent_list *new_el;
4007 struct ocfs2_path *left_path = NULL, *right_path = NULL;
4008 struct buffer_head *bh;
4009 struct ocfs2_extent_block *eb;
4010
4011 if (index > 0) {
4012 rec = &el->l_recs[index - 1];
4013 } else if (path->p_tree_depth > 0) {
4014 status = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
4015 path, &left_cpos);
4016 if (status)
4017 goto out;
4018
4019 if (left_cpos != 0) {
4020 left_path = ocfs2_new_path(path_root_bh(path),
4021 path_root_el(path));
4022 if (!left_path)
4023 goto out;
4024
4025 status = ocfs2_find_path(inode, left_path, left_cpos);
4026 if (status)
4027 goto out;
4028
4029 new_el = path_leaf_el(left_path);
4030
4031 if (le16_to_cpu(new_el->l_next_free_rec) !=
4032 le16_to_cpu(new_el->l_count)) {
4033 bh = path_leaf_bh(left_path);
4034 eb = (struct ocfs2_extent_block *)bh->b_data;
4035 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
4036 eb);
4037 goto out;
4038 }
4039 rec = &new_el->l_recs[
4040 le16_to_cpu(new_el->l_next_free_rec) - 1];
4041 }
4042 }
Mark Fasheh328d5752007-06-18 10:48:04 -07004043
4044 /*
4045 * We're careful to check for an empty extent record here -
4046 * the merge code will know what to do if it sees one.
4047 */
Tao Maad5a4d72008-01-30 14:21:32 +08004048 if (rec) {
Mark Fasheh328d5752007-06-18 10:48:04 -07004049 if (index == 1 && ocfs2_is_empty_extent(rec)) {
4050 if (split_rec->e_cpos == el->l_recs[index].e_cpos)
4051 ret = CONTIG_RIGHT;
4052 } else {
4053 ret = ocfs2_extent_contig(inode, rec, split_rec);
4054 }
4055 }
4056
Tao Maad5a4d72008-01-30 14:21:32 +08004057 rec = NULL;
4058 if (index < (le16_to_cpu(el->l_next_free_rec) - 1))
4059 rec = &el->l_recs[index + 1];
4060 else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) &&
4061 path->p_tree_depth > 0) {
4062 status = ocfs2_find_cpos_for_right_leaf(inode->i_sb,
4063 path, &right_cpos);
4064 if (status)
4065 goto out;
4066
4067 if (right_cpos == 0)
4068 goto out;
4069
4070 right_path = ocfs2_new_path(path_root_bh(path),
4071 path_root_el(path));
4072 if (!right_path)
4073 goto out;
4074
4075 status = ocfs2_find_path(inode, right_path, right_cpos);
4076 if (status)
4077 goto out;
4078
4079 new_el = path_leaf_el(right_path);
4080 rec = &new_el->l_recs[0];
4081 if (ocfs2_is_empty_extent(rec)) {
4082 if (le16_to_cpu(new_el->l_next_free_rec) <= 1) {
4083 bh = path_leaf_bh(right_path);
4084 eb = (struct ocfs2_extent_block *)bh->b_data;
4085 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
4086 eb);
4087 goto out;
4088 }
4089 rec = &new_el->l_recs[1];
4090 }
4091 }
4092
4093 if (rec) {
Mark Fasheh328d5752007-06-18 10:48:04 -07004094 enum ocfs2_contig_type contig_type;
4095
Mark Fasheh328d5752007-06-18 10:48:04 -07004096 contig_type = ocfs2_extent_contig(inode, rec, split_rec);
4097
4098 if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT)
4099 ret = CONTIG_LEFTRIGHT;
4100 else if (ret == CONTIG_NONE)
4101 ret = contig_type;
4102 }
4103
Tao Maad5a4d72008-01-30 14:21:32 +08004104out:
4105 if (left_path)
4106 ocfs2_free_path(left_path);
4107 if (right_path)
4108 ocfs2_free_path(right_path);
4109
Mark Fasheh328d5752007-06-18 10:48:04 -07004110 return ret;
4111}
4112
Mark Fashehdcd05382007-01-16 11:32:23 -08004113static void ocfs2_figure_contig_type(struct inode *inode,
4114 struct ocfs2_insert_type *insert,
4115 struct ocfs2_extent_list *el,
Tao Maca12b7c2008-08-18 17:38:52 +08004116 struct ocfs2_extent_rec *insert_rec,
4117 struct ocfs2_extent_tree *et)
Mark Fashehdcd05382007-01-16 11:32:23 -08004118{
4119 int i;
4120 enum ocfs2_contig_type contig_type = CONTIG_NONE;
4121
Mark Fashehe48edee2007-03-07 16:46:57 -08004122 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
4123
Mark Fashehdcd05382007-01-16 11:32:23 -08004124 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
4125 contig_type = ocfs2_extent_contig(inode, &el->l_recs[i],
4126 insert_rec);
4127 if (contig_type != CONTIG_NONE) {
4128 insert->ins_contig_index = i;
4129 break;
4130 }
4131 }
4132 insert->ins_contig = contig_type;
Tao Maca12b7c2008-08-18 17:38:52 +08004133
4134 if (insert->ins_contig != CONTIG_NONE) {
4135 struct ocfs2_extent_rec *rec =
4136 &el->l_recs[insert->ins_contig_index];
4137 unsigned int len = le16_to_cpu(rec->e_leaf_clusters) +
4138 le16_to_cpu(insert_rec->e_leaf_clusters);
4139
4140 /*
4141 * Caller might want us to limit the size of extents, don't
4142 * calculate contiguousness if we might exceed that limit.
4143 */
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004144 if (et->et_max_leaf_clusters &&
4145 (len > et->et_max_leaf_clusters))
Tao Maca12b7c2008-08-18 17:38:52 +08004146 insert->ins_contig = CONTIG_NONE;
4147 }
Mark Fashehdcd05382007-01-16 11:32:23 -08004148}
4149
4150/*
4151 * This should only be called against the righmost leaf extent list.
4152 *
4153 * ocfs2_figure_appending_type() will figure out whether we'll have to
4154 * insert at the tail of the rightmost leaf.
4155 *
Tao Mae7d4cb62008-08-18 17:38:44 +08004156 * This should also work against the root extent list for tree's with 0
4157 * depth. If we consider the root extent list to be the rightmost leaf node
Mark Fashehdcd05382007-01-16 11:32:23 -08004158 * then the logic here makes sense.
4159 */
4160static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert,
4161 struct ocfs2_extent_list *el,
4162 struct ocfs2_extent_rec *insert_rec)
4163{
4164 int i;
4165 u32 cpos = le32_to_cpu(insert_rec->e_cpos);
4166 struct ocfs2_extent_rec *rec;
4167
4168 insert->ins_appending = APPEND_NONE;
4169
Mark Fashehe48edee2007-03-07 16:46:57 -08004170 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
Mark Fashehdcd05382007-01-16 11:32:23 -08004171
4172 if (!el->l_next_free_rec)
4173 goto set_tail_append;
4174
4175 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
4176 /* Were all records empty? */
4177 if (le16_to_cpu(el->l_next_free_rec) == 1)
4178 goto set_tail_append;
4179 }
4180
4181 i = le16_to_cpu(el->l_next_free_rec) - 1;
4182 rec = &el->l_recs[i];
4183
Mark Fashehe48edee2007-03-07 16:46:57 -08004184 if (cpos >=
4185 (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)))
Mark Fashehdcd05382007-01-16 11:32:23 -08004186 goto set_tail_append;
4187
4188 return;
4189
4190set_tail_append:
4191 insert->ins_appending = APPEND_TAIL;
4192}
4193
4194/*
4195 * Helper function called at the begining of an insert.
4196 *
4197 * This computes a few things that are commonly used in the process of
4198 * inserting into the btree:
4199 * - Whether the new extent is contiguous with an existing one.
4200 * - The current tree depth.
4201 * - Whether the insert is an appending one.
4202 * - The total # of free records in the tree.
4203 *
4204 * All of the information is stored on the ocfs2_insert_type
4205 * structure.
4206 */
4207static int ocfs2_figure_insert_type(struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +08004208 struct ocfs2_extent_tree *et,
Mark Fashehdcd05382007-01-16 11:32:23 -08004209 struct buffer_head **last_eb_bh,
4210 struct ocfs2_extent_rec *insert_rec,
Tao Maoc77534f2007-08-28 17:22:33 -07004211 int *free_records,
Mark Fashehdcd05382007-01-16 11:32:23 -08004212 struct ocfs2_insert_type *insert)
4213{
4214 int ret;
Mark Fashehdcd05382007-01-16 11:32:23 -08004215 struct ocfs2_extent_block *eb;
4216 struct ocfs2_extent_list *el;
4217 struct ocfs2_path *path = NULL;
4218 struct buffer_head *bh = NULL;
4219
Mark Fasheh328d5752007-06-18 10:48:04 -07004220 insert->ins_split = SPLIT_NONE;
4221
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004222 el = et->et_root_el;
Mark Fashehdcd05382007-01-16 11:32:23 -08004223 insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth);
4224
4225 if (el->l_tree_depth) {
4226 /*
4227 * If we have tree depth, we read in the
4228 * rightmost extent block ahead of time as
4229 * ocfs2_figure_insert_type() and ocfs2_add_branch()
4230 * may want it later.
4231 */
4232 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
Joel Becker35dc0aa2008-08-20 16:25:06 -07004233 ocfs2_et_get_last_eb_blk(et), &bh,
Mark Fashehdcd05382007-01-16 11:32:23 -08004234 OCFS2_BH_CACHED, inode);
4235 if (ret) {
4236 mlog_exit(ret);
4237 goto out;
4238 }
4239 eb = (struct ocfs2_extent_block *) bh->b_data;
4240 el = &eb->h_list;
4241 }
4242
4243 /*
4244 * Unless we have a contiguous insert, we'll need to know if
4245 * there is room left in our allocation tree for another
4246 * extent record.
4247 *
4248 * XXX: This test is simplistic, we can search for empty
4249 * extent records too.
4250 */
Tao Maoc77534f2007-08-28 17:22:33 -07004251 *free_records = le16_to_cpu(el->l_count) -
Mark Fashehdcd05382007-01-16 11:32:23 -08004252 le16_to_cpu(el->l_next_free_rec);
4253
4254 if (!insert->ins_tree_depth) {
Tao Maca12b7c2008-08-18 17:38:52 +08004255 ocfs2_figure_contig_type(inode, insert, el, insert_rec, et);
Mark Fashehdcd05382007-01-16 11:32:23 -08004256 ocfs2_figure_appending_type(insert, el, insert_rec);
4257 return 0;
4258 }
4259
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004260 path = ocfs2_new_path(et->et_root_bh, et->et_root_el);
Mark Fashehdcd05382007-01-16 11:32:23 -08004261 if (!path) {
4262 ret = -ENOMEM;
4263 mlog_errno(ret);
4264 goto out;
4265 }
4266
4267 /*
4268 * In the case that we're inserting past what the tree
4269 * currently accounts for, ocfs2_find_path() will return for
4270 * us the rightmost tree path. This is accounted for below in
4271 * the appending code.
4272 */
4273 ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos));
4274 if (ret) {
4275 mlog_errno(ret);
4276 goto out;
4277 }
4278
4279 el = path_leaf_el(path);
4280
4281 /*
4282 * Now that we have the path, there's two things we want to determine:
4283 * 1) Contiguousness (also set contig_index if this is so)
4284 *
4285 * 2) Are we doing an append? We can trivially break this up
4286 * into two types of appends: simple record append, or a
4287 * rotate inside the tail leaf.
4288 */
Tao Maca12b7c2008-08-18 17:38:52 +08004289 ocfs2_figure_contig_type(inode, insert, el, insert_rec, et);
Mark Fashehdcd05382007-01-16 11:32:23 -08004290
4291 /*
4292 * The insert code isn't quite ready to deal with all cases of
4293 * left contiguousness. Specifically, if it's an insert into
4294 * the 1st record in a leaf, it will require the adjustment of
Mark Fashehe48edee2007-03-07 16:46:57 -08004295 * cluster count on the last record of the path directly to it's
Mark Fashehdcd05382007-01-16 11:32:23 -08004296 * left. For now, just catch that case and fool the layers
4297 * above us. This works just fine for tree_depth == 0, which
4298 * is why we allow that above.
4299 */
4300 if (insert->ins_contig == CONTIG_LEFT &&
4301 insert->ins_contig_index == 0)
4302 insert->ins_contig = CONTIG_NONE;
4303
4304 /*
4305 * Ok, so we can simply compare against last_eb to figure out
4306 * whether the path doesn't exist. This will only happen in
4307 * the case that we're doing a tail append, so maybe we can
4308 * take advantage of that information somehow.
4309 */
Joel Becker35dc0aa2008-08-20 16:25:06 -07004310 if (ocfs2_et_get_last_eb_blk(et) ==
Tao Mae7d4cb62008-08-18 17:38:44 +08004311 path_leaf_bh(path)->b_blocknr) {
Mark Fashehdcd05382007-01-16 11:32:23 -08004312 /*
4313 * Ok, ocfs2_find_path() returned us the rightmost
4314 * tree path. This might be an appending insert. There are
4315 * two cases:
4316 * 1) We're doing a true append at the tail:
4317 * -This might even be off the end of the leaf
4318 * 2) We're "appending" by rotating in the tail
4319 */
4320 ocfs2_figure_appending_type(insert, el, insert_rec);
4321 }
4322
4323out:
4324 ocfs2_free_path(path);
4325
4326 if (ret == 0)
4327 *last_eb_bh = bh;
4328 else
4329 brelse(bh);
4330 return ret;
4331}
4332
4333/*
4334 * Insert an extent into an inode btree.
4335 *
4336 * The caller needs to update fe->i_clusters
4337 */
Tao Maf56654c2008-08-18 17:38:48 +08004338static int ocfs2_insert_extent(struct ocfs2_super *osb,
4339 handle_t *handle,
4340 struct inode *inode,
4341 struct buffer_head *root_bh,
4342 u32 cpos,
4343 u64 start_blk,
4344 u32 new_clusters,
4345 u8 flags,
4346 struct ocfs2_alloc_context *meta_ac,
4347 struct ocfs2_extent_tree *et)
Mark Fashehccd979b2005-12-15 14:31:24 -08004348{
Mark Fashehc3afcbb2007-05-29 14:28:51 -07004349 int status;
Tao Maoc77534f2007-08-28 17:22:33 -07004350 int uninitialized_var(free_records);
Mark Fashehccd979b2005-12-15 14:31:24 -08004351 struct buffer_head *last_eb_bh = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08004352 struct ocfs2_insert_type insert = {0, };
4353 struct ocfs2_extent_rec rec;
Mark Fashehccd979b2005-12-15 14:31:24 -08004354
Mark Fasheh1afc32b2007-09-07 14:46:51 -07004355 BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
4356
Mark Fashehdcd05382007-01-16 11:32:23 -08004357 mlog(0, "add %u clusters at position %u to inode %llu\n",
4358 new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08004359
Mark Fashehdcd05382007-01-16 11:32:23 -08004360 mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
4361 (OCFS2_I(inode)->ip_clusters != cpos),
4362 "Device %s, asking for sparse allocation: inode %llu, "
4363 "cpos %u, clusters %u\n",
4364 osb->dev_str,
4365 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos,
4366 OCFS2_I(inode)->ip_clusters);
Mark Fashehccd979b2005-12-15 14:31:24 -08004367
Mark Fashehe48edee2007-03-07 16:46:57 -08004368 memset(&rec, 0, sizeof(rec));
Mark Fashehdcd05382007-01-16 11:32:23 -08004369 rec.e_cpos = cpu_to_le32(cpos);
4370 rec.e_blkno = cpu_to_le64(start_blk);
Mark Fashehe48edee2007-03-07 16:46:57 -08004371 rec.e_leaf_clusters = cpu_to_le16(new_clusters);
Mark Fasheh2ae99a62007-03-09 16:43:28 -08004372 rec.e_flags = flags;
Mark Fashehccd979b2005-12-15 14:31:24 -08004373
Tao Mae7d4cb62008-08-18 17:38:44 +08004374 status = ocfs2_figure_insert_type(inode, et, &last_eb_bh, &rec,
Tao Maoc77534f2007-08-28 17:22:33 -07004375 &free_records, &insert);
Mark Fashehdcd05382007-01-16 11:32:23 -08004376 if (status < 0) {
4377 mlog_errno(status);
4378 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08004379 }
4380
Mark Fashehdcd05382007-01-16 11:32:23 -08004381 mlog(0, "Insert.appending: %u, Insert.Contig: %u, "
4382 "Insert.contig_index: %d, Insert.free_records: %d, "
4383 "Insert.tree_depth: %d\n",
4384 insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
Tao Maoc77534f2007-08-28 17:22:33 -07004385 free_records, insert.ins_tree_depth);
Mark Fashehccd979b2005-12-15 14:31:24 -08004386
Tao Maoc77534f2007-08-28 17:22:33 -07004387 if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
Tao Mae7d4cb62008-08-18 17:38:44 +08004388 status = ocfs2_grow_tree(inode, handle, et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004389 &insert.ins_tree_depth, &last_eb_bh,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07004390 meta_ac);
4391 if (status) {
Mark Fashehccd979b2005-12-15 14:31:24 -08004392 mlog_errno(status);
4393 goto bail;
4394 }
Mark Fashehccd979b2005-12-15 14:31:24 -08004395 }
4396
Mark Fashehdcd05382007-01-16 11:32:23 -08004397 /* Finally, we can add clusters. This might rotate the tree for us. */
Tao Mae7d4cb62008-08-18 17:38:44 +08004398 status = ocfs2_do_insert_extent(inode, handle, et, &rec, &insert);
Mark Fashehccd979b2005-12-15 14:31:24 -08004399 if (status < 0)
4400 mlog_errno(status);
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004401 else if (et->et_type == OCFS2_DINODE_EXTENT)
Mark Fasheh83418972007-04-23 18:53:12 -07004402 ocfs2_extent_map_insert_rec(inode, &rec);
Mark Fashehccd979b2005-12-15 14:31:24 -08004403
4404bail:
Mark Fashehccd979b2005-12-15 14:31:24 -08004405 if (last_eb_bh)
4406 brelse(last_eb_bh);
4407
Tao Maf56654c2008-08-18 17:38:48 +08004408 mlog_exit(status);
4409 return status;
4410}
4411
4412int ocfs2_dinode_insert_extent(struct ocfs2_super *osb,
4413 handle_t *handle,
4414 struct inode *inode,
4415 struct buffer_head *root_bh,
4416 u32 cpos,
4417 u64 start_blk,
4418 u32 new_clusters,
4419 u8 flags,
4420 struct ocfs2_alloc_context *meta_ac)
4421{
4422 int status;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004423 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +08004424
Joel Beckerdc0ce612008-08-20 16:48:35 -07004425 ocfs2_get_extent_tree(&et, inode, root_bh, OCFS2_DINODE_EXTENT,
4426 NULL);
Tao Maf56654c2008-08-18 17:38:48 +08004427 status = ocfs2_insert_extent(osb, handle, inode, root_bh,
4428 cpos, start_blk, new_clusters,
Joel Beckerdc0ce612008-08-20 16:48:35 -07004429 flags, meta_ac, &et);
4430 ocfs2_put_extent_tree(&et);
Tao Maf56654c2008-08-18 17:38:48 +08004431
Tao Maf56654c2008-08-18 17:38:48 +08004432 return status;
4433}
4434
4435int ocfs2_xattr_value_insert_extent(struct ocfs2_super *osb,
4436 handle_t *handle,
4437 struct inode *inode,
4438 struct buffer_head *root_bh,
4439 u32 cpos,
4440 u64 start_blk,
4441 u32 new_clusters,
4442 u8 flags,
4443 struct ocfs2_alloc_context *meta_ac,
4444 void *private)
4445{
4446 int status;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004447 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +08004448
Joel Beckerdc0ce612008-08-20 16:48:35 -07004449 ocfs2_get_extent_tree(&et, inode, root_bh,
4450 OCFS2_XATTR_VALUE_EXTENT, private);
Tao Maf56654c2008-08-18 17:38:48 +08004451 status = ocfs2_insert_extent(osb, handle, inode, root_bh,
4452 cpos, start_blk, new_clusters,
Joel Beckerdc0ce612008-08-20 16:48:35 -07004453 flags, meta_ac, &et);
4454 ocfs2_put_extent_tree(&et);
Tao Maf56654c2008-08-18 17:38:48 +08004455
Mark Fashehccd979b2005-12-15 14:31:24 -08004456 return status;
4457}
4458
Tao Maba492612008-08-18 17:38:49 +08004459int ocfs2_xattr_tree_insert_extent(struct ocfs2_super *osb,
4460 handle_t *handle,
4461 struct inode *inode,
4462 struct buffer_head *root_bh,
4463 u32 cpos,
4464 u64 start_blk,
4465 u32 new_clusters,
4466 u8 flags,
4467 struct ocfs2_alloc_context *meta_ac)
4468{
4469 int status;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004470 struct ocfs2_extent_tree et;
Tao Maba492612008-08-18 17:38:49 +08004471
Joel Beckerdc0ce612008-08-20 16:48:35 -07004472 ocfs2_get_extent_tree(&et, inode, root_bh, OCFS2_XATTR_TREE_EXTENT,
4473 NULL);
Tao Maba492612008-08-18 17:38:49 +08004474 status = ocfs2_insert_extent(osb, handle, inode, root_bh,
4475 cpos, start_blk, new_clusters,
Joel Beckerdc0ce612008-08-20 16:48:35 -07004476 flags, meta_ac, &et);
4477 ocfs2_put_extent_tree(&et);
Tao Maba492612008-08-18 17:38:49 +08004478
Tao Maba492612008-08-18 17:38:49 +08004479 return status;
4480}
4481
Tao Ma0eb8d472008-08-18 17:38:45 +08004482/*
4483 * Allcate and add clusters into the extent b-tree.
4484 * The new clusters(clusters_to_add) will be inserted at logical_offset.
4485 * The extent b-tree's root is root_el and it should be in root_bh, and
4486 * it is not limited to the file storage. Any extent tree can use this
4487 * function if it implements the proper ocfs2_extent_tree.
4488 */
4489int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb,
4490 struct inode *inode,
4491 u32 *logical_offset,
4492 u32 clusters_to_add,
4493 int mark_unwritten,
4494 struct buffer_head *root_bh,
4495 struct ocfs2_extent_list *root_el,
4496 handle_t *handle,
4497 struct ocfs2_alloc_context *data_ac,
4498 struct ocfs2_alloc_context *meta_ac,
4499 enum ocfs2_alloc_restarted *reason_ret,
Tao Maf56654c2008-08-18 17:38:48 +08004500 enum ocfs2_extent_tree_type type,
4501 void *private)
Tao Ma0eb8d472008-08-18 17:38:45 +08004502{
4503 int status = 0;
4504 int free_extents;
4505 enum ocfs2_alloc_restarted reason = RESTART_NONE;
4506 u32 bit_off, num_bits;
4507 u64 block;
4508 u8 flags = 0;
4509
4510 BUG_ON(!clusters_to_add);
4511
4512 if (mark_unwritten)
4513 flags = OCFS2_EXT_UNWRITTEN;
4514
Tao Maf56654c2008-08-18 17:38:48 +08004515 free_extents = ocfs2_num_free_extents(osb, inode, root_bh, type,
4516 private);
Tao Ma0eb8d472008-08-18 17:38:45 +08004517 if (free_extents < 0) {
4518 status = free_extents;
4519 mlog_errno(status);
4520 goto leave;
4521 }
4522
4523 /* there are two cases which could cause us to EAGAIN in the
4524 * we-need-more-metadata case:
4525 * 1) we haven't reserved *any*
4526 * 2) we are so fragmented, we've needed to add metadata too
4527 * many times. */
4528 if (!free_extents && !meta_ac) {
4529 mlog(0, "we haven't reserved any metadata!\n");
4530 status = -EAGAIN;
4531 reason = RESTART_META;
4532 goto leave;
4533 } else if ((!free_extents)
4534 && (ocfs2_alloc_context_bits_left(meta_ac)
4535 < ocfs2_extend_meta_needed(root_el))) {
4536 mlog(0, "filesystem is really fragmented...\n");
4537 status = -EAGAIN;
4538 reason = RESTART_META;
4539 goto leave;
4540 }
4541
4542 status = __ocfs2_claim_clusters(osb, handle, data_ac, 1,
4543 clusters_to_add, &bit_off, &num_bits);
4544 if (status < 0) {
4545 if (status != -ENOSPC)
4546 mlog_errno(status);
4547 goto leave;
4548 }
4549
4550 BUG_ON(num_bits > clusters_to_add);
4551
4552 /* reserve our write early -- insert_extent may update the inode */
4553 status = ocfs2_journal_access(handle, inode, root_bh,
4554 OCFS2_JOURNAL_ACCESS_WRITE);
4555 if (status < 0) {
4556 mlog_errno(status);
4557 goto leave;
4558 }
4559
4560 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
4561 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
4562 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
Tao Maf56654c2008-08-18 17:38:48 +08004563 if (type == OCFS2_DINODE_EXTENT)
4564 status = ocfs2_dinode_insert_extent(osb, handle, inode, root_bh,
4565 *logical_offset, block,
4566 num_bits, flags, meta_ac);
Tao Maba492612008-08-18 17:38:49 +08004567 else if (type == OCFS2_XATTR_TREE_EXTENT)
4568 status = ocfs2_xattr_tree_insert_extent(osb, handle,
4569 inode, root_bh,
4570 *logical_offset,
4571 block, num_bits, flags,
4572 meta_ac);
Tao Maf56654c2008-08-18 17:38:48 +08004573 else
4574 status = ocfs2_xattr_value_insert_extent(osb, handle,
4575 inode, root_bh,
4576 *logical_offset,
4577 block, num_bits, flags,
4578 meta_ac, private);
Tao Ma0eb8d472008-08-18 17:38:45 +08004579 if (status < 0) {
4580 mlog_errno(status);
4581 goto leave;
4582 }
4583
4584 status = ocfs2_journal_dirty(handle, root_bh);
4585 if (status < 0) {
4586 mlog_errno(status);
4587 goto leave;
4588 }
4589
4590 clusters_to_add -= num_bits;
4591 *logical_offset += num_bits;
4592
4593 if (clusters_to_add) {
4594 mlog(0, "need to alloc once more, wanted = %u\n",
4595 clusters_to_add);
4596 status = -EAGAIN;
4597 reason = RESTART_TRANS;
4598 }
4599
4600leave:
4601 mlog_exit(status);
4602 if (reason_ret)
4603 *reason_ret = reason;
4604 return status;
4605}
4606
Mark Fasheh328d5752007-06-18 10:48:04 -07004607static void ocfs2_make_right_split_rec(struct super_block *sb,
4608 struct ocfs2_extent_rec *split_rec,
4609 u32 cpos,
4610 struct ocfs2_extent_rec *rec)
4611{
4612 u32 rec_cpos = le32_to_cpu(rec->e_cpos);
4613 u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters);
4614
4615 memset(split_rec, 0, sizeof(struct ocfs2_extent_rec));
4616
4617 split_rec->e_cpos = cpu_to_le32(cpos);
4618 split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos);
4619
4620 split_rec->e_blkno = rec->e_blkno;
4621 le64_add_cpu(&split_rec->e_blkno,
4622 ocfs2_clusters_to_blocks(sb, cpos - rec_cpos));
4623
4624 split_rec->e_flags = rec->e_flags;
4625}
4626
4627static int ocfs2_split_and_insert(struct inode *inode,
4628 handle_t *handle,
4629 struct ocfs2_path *path,
Tao Mae7d4cb62008-08-18 17:38:44 +08004630 struct ocfs2_extent_tree *et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004631 struct buffer_head **last_eb_bh,
4632 int split_index,
4633 struct ocfs2_extent_rec *orig_split_rec,
4634 struct ocfs2_alloc_context *meta_ac)
4635{
4636 int ret = 0, depth;
4637 unsigned int insert_range, rec_range, do_leftright = 0;
4638 struct ocfs2_extent_rec tmprec;
4639 struct ocfs2_extent_list *rightmost_el;
4640 struct ocfs2_extent_rec rec;
4641 struct ocfs2_extent_rec split_rec = *orig_split_rec;
4642 struct ocfs2_insert_type insert;
4643 struct ocfs2_extent_block *eb;
Mark Fasheh328d5752007-06-18 10:48:04 -07004644
4645leftright:
4646 /*
4647 * Store a copy of the record on the stack - it might move
4648 * around as the tree is manipulated below.
4649 */
4650 rec = path_leaf_el(path)->l_recs[split_index];
4651
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004652 rightmost_el = et->et_root_el;
Mark Fasheh328d5752007-06-18 10:48:04 -07004653
4654 depth = le16_to_cpu(rightmost_el->l_tree_depth);
4655 if (depth) {
4656 BUG_ON(!(*last_eb_bh));
4657 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
4658 rightmost_el = &eb->h_list;
4659 }
4660
4661 if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
4662 le16_to_cpu(rightmost_el->l_count)) {
Tao Mae7d4cb62008-08-18 17:38:44 +08004663 ret = ocfs2_grow_tree(inode, handle, et,
4664 &depth, last_eb_bh, meta_ac);
Mark Fasheh328d5752007-06-18 10:48:04 -07004665 if (ret) {
4666 mlog_errno(ret);
4667 goto out;
4668 }
Mark Fasheh328d5752007-06-18 10:48:04 -07004669 }
4670
4671 memset(&insert, 0, sizeof(struct ocfs2_insert_type));
4672 insert.ins_appending = APPEND_NONE;
4673 insert.ins_contig = CONTIG_NONE;
Mark Fasheh328d5752007-06-18 10:48:04 -07004674 insert.ins_tree_depth = depth;
4675
4676 insert_range = le32_to_cpu(split_rec.e_cpos) +
4677 le16_to_cpu(split_rec.e_leaf_clusters);
4678 rec_range = le32_to_cpu(rec.e_cpos) +
4679 le16_to_cpu(rec.e_leaf_clusters);
4680
4681 if (split_rec.e_cpos == rec.e_cpos) {
4682 insert.ins_split = SPLIT_LEFT;
4683 } else if (insert_range == rec_range) {
4684 insert.ins_split = SPLIT_RIGHT;
4685 } else {
4686 /*
4687 * Left/right split. We fake this as a right split
4688 * first and then make a second pass as a left split.
4689 */
4690 insert.ins_split = SPLIT_RIGHT;
4691
4692 ocfs2_make_right_split_rec(inode->i_sb, &tmprec, insert_range,
4693 &rec);
4694
4695 split_rec = tmprec;
4696
4697 BUG_ON(do_leftright);
4698 do_leftright = 1;
4699 }
4700
Tao Mae7d4cb62008-08-18 17:38:44 +08004701 ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert);
Mark Fasheh328d5752007-06-18 10:48:04 -07004702 if (ret) {
4703 mlog_errno(ret);
4704 goto out;
4705 }
4706
4707 if (do_leftright == 1) {
4708 u32 cpos;
4709 struct ocfs2_extent_list *el;
4710
4711 do_leftright++;
4712 split_rec = *orig_split_rec;
4713
4714 ocfs2_reinit_path(path, 1);
4715
4716 cpos = le32_to_cpu(split_rec.e_cpos);
4717 ret = ocfs2_find_path(inode, path, cpos);
4718 if (ret) {
4719 mlog_errno(ret);
4720 goto out;
4721 }
4722
4723 el = path_leaf_el(path);
4724 split_index = ocfs2_search_extent_list(el, cpos);
4725 goto leftright;
4726 }
4727out:
4728
4729 return ret;
4730}
4731
4732/*
4733 * Mark part or all of the extent record at split_index in the leaf
4734 * pointed to by path as written. This removes the unwritten
4735 * extent flag.
4736 *
4737 * Care is taken to handle contiguousness so as to not grow the tree.
4738 *
4739 * meta_ac is not strictly necessary - we only truly need it if growth
4740 * of the tree is required. All other cases will degrade into a less
4741 * optimal tree layout.
4742 *
Tao Mae7d4cb62008-08-18 17:38:44 +08004743 * last_eb_bh should be the rightmost leaf block for any extent
4744 * btree. Since a split may grow the tree or a merge might shrink it,
4745 * the caller cannot trust the contents of that buffer after this call.
Mark Fasheh328d5752007-06-18 10:48:04 -07004746 *
4747 * This code is optimized for readability - several passes might be
4748 * made over certain portions of the tree. All of those blocks will
4749 * have been brought into cache (and pinned via the journal), so the
4750 * extra overhead is not expressed in terms of disk reads.
4751 */
4752static int __ocfs2_mark_extent_written(struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +08004753 struct ocfs2_extent_tree *et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004754 handle_t *handle,
4755 struct ocfs2_path *path,
4756 int split_index,
4757 struct ocfs2_extent_rec *split_rec,
4758 struct ocfs2_alloc_context *meta_ac,
4759 struct ocfs2_cached_dealloc_ctxt *dealloc)
4760{
4761 int ret = 0;
4762 struct ocfs2_extent_list *el = path_leaf_el(path);
Mark Fashehe8aed342007-12-03 16:43:01 -08004763 struct buffer_head *last_eb_bh = NULL;
Mark Fasheh328d5752007-06-18 10:48:04 -07004764 struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
4765 struct ocfs2_merge_ctxt ctxt;
4766 struct ocfs2_extent_list *rightmost_el;
4767
Roel Kluin3cf0c502007-10-27 00:20:36 +02004768 if (!(rec->e_flags & OCFS2_EXT_UNWRITTEN)) {
Mark Fasheh328d5752007-06-18 10:48:04 -07004769 ret = -EIO;
4770 mlog_errno(ret);
4771 goto out;
4772 }
4773
4774 if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) ||
4775 ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) <
4776 (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) {
4777 ret = -EIO;
4778 mlog_errno(ret);
4779 goto out;
4780 }
4781
Tao Maad5a4d72008-01-30 14:21:32 +08004782 ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el,
Mark Fasheh328d5752007-06-18 10:48:04 -07004783 split_index,
4784 split_rec);
4785
4786 /*
4787 * The core merge / split code wants to know how much room is
4788 * left in this inodes allocation tree, so we pass the
4789 * rightmost extent list.
4790 */
4791 if (path->p_tree_depth) {
4792 struct ocfs2_extent_block *eb;
Mark Fasheh328d5752007-06-18 10:48:04 -07004793
4794 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
Joel Becker35dc0aa2008-08-20 16:25:06 -07004795 ocfs2_et_get_last_eb_blk(et),
Mark Fasheh328d5752007-06-18 10:48:04 -07004796 &last_eb_bh, OCFS2_BH_CACHED, inode);
4797 if (ret) {
4798 mlog_exit(ret);
4799 goto out;
4800 }
4801
4802 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
4803 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
4804 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
4805 ret = -EROFS;
4806 goto out;
4807 }
4808
4809 rightmost_el = &eb->h_list;
4810 } else
4811 rightmost_el = path_root_el(path);
4812
Mark Fasheh328d5752007-06-18 10:48:04 -07004813 if (rec->e_cpos == split_rec->e_cpos &&
4814 rec->e_leaf_clusters == split_rec->e_leaf_clusters)
4815 ctxt.c_split_covers_rec = 1;
4816 else
4817 ctxt.c_split_covers_rec = 0;
4818
4819 ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
4820
Mark Fasheh015452b2007-09-12 10:21:22 -07004821 mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n",
4822 split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent,
4823 ctxt.c_split_covers_rec);
Mark Fasheh328d5752007-06-18 10:48:04 -07004824
4825 if (ctxt.c_contig_type == CONTIG_NONE) {
4826 if (ctxt.c_split_covers_rec)
4827 el->l_recs[split_index] = *split_rec;
4828 else
Tao Mae7d4cb62008-08-18 17:38:44 +08004829 ret = ocfs2_split_and_insert(inode, handle, path, et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004830 &last_eb_bh, split_index,
4831 split_rec, meta_ac);
4832 if (ret)
4833 mlog_errno(ret);
4834 } else {
4835 ret = ocfs2_try_to_merge_extent(inode, handle, path,
4836 split_index, split_rec,
Tao Mae7d4cb62008-08-18 17:38:44 +08004837 dealloc, &ctxt, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07004838 if (ret)
4839 mlog_errno(ret);
4840 }
4841
Mark Fasheh328d5752007-06-18 10:48:04 -07004842out:
4843 brelse(last_eb_bh);
4844 return ret;
4845}
4846
4847/*
4848 * Mark the already-existing extent at cpos as written for len clusters.
4849 *
4850 * If the existing extent is larger than the request, initiate a
4851 * split. An attempt will be made at merging with adjacent extents.
4852 *
4853 * The caller is responsible for passing down meta_ac if we'll need it.
4854 */
Tao Mae7d4cb62008-08-18 17:38:44 +08004855int ocfs2_mark_extent_written(struct inode *inode, struct buffer_head *root_bh,
Mark Fasheh328d5752007-06-18 10:48:04 -07004856 handle_t *handle, u32 cpos, u32 len, u32 phys,
4857 struct ocfs2_alloc_context *meta_ac,
Tao Mae7d4cb62008-08-18 17:38:44 +08004858 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Maf56654c2008-08-18 17:38:48 +08004859 enum ocfs2_extent_tree_type et_type,
4860 void *private)
Mark Fasheh328d5752007-06-18 10:48:04 -07004861{
4862 int ret, index;
4863 u64 start_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys);
4864 struct ocfs2_extent_rec split_rec;
4865 struct ocfs2_path *left_path = NULL;
4866 struct ocfs2_extent_list *el;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004867 struct ocfs2_extent_tree et;
Mark Fasheh328d5752007-06-18 10:48:04 -07004868
4869 mlog(0, "Inode %lu cpos %u, len %u, phys %u (%llu)\n",
4870 inode->i_ino, cpos, len, phys, (unsigned long long)start_blkno);
4871
Joel Beckerdc0ce612008-08-20 16:48:35 -07004872 ocfs2_get_extent_tree(&et, inode, root_bh, et_type, private);
4873
Mark Fasheh328d5752007-06-18 10:48:04 -07004874 if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
4875 ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
4876 "that are being written to, but the feature bit "
4877 "is not set in the super block.",
4878 (unsigned long long)OCFS2_I(inode)->ip_blkno);
4879 ret = -EROFS;
4880 goto out;
4881 }
4882
4883 /*
4884 * XXX: This should be fixed up so that we just re-insert the
4885 * next extent records.
4886 */
Tao Mae7d4cb62008-08-18 17:38:44 +08004887 if (et_type == OCFS2_DINODE_EXTENT)
4888 ocfs2_extent_map_trunc(inode, 0);
Mark Fasheh328d5752007-06-18 10:48:04 -07004889
Joel Beckerdc0ce612008-08-20 16:48:35 -07004890 left_path = ocfs2_new_path(et.et_root_bh, et.et_root_el);
Mark Fasheh328d5752007-06-18 10:48:04 -07004891 if (!left_path) {
4892 ret = -ENOMEM;
4893 mlog_errno(ret);
4894 goto out;
4895 }
4896
4897 ret = ocfs2_find_path(inode, left_path, cpos);
4898 if (ret) {
4899 mlog_errno(ret);
4900 goto out;
4901 }
4902 el = path_leaf_el(left_path);
4903
4904 index = ocfs2_search_extent_list(el, cpos);
4905 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
4906 ocfs2_error(inode->i_sb,
4907 "Inode %llu has an extent at cpos %u which can no "
4908 "longer be found.\n",
4909 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
4910 ret = -EROFS;
4911 goto out;
4912 }
4913
4914 memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec));
4915 split_rec.e_cpos = cpu_to_le32(cpos);
4916 split_rec.e_leaf_clusters = cpu_to_le16(len);
4917 split_rec.e_blkno = cpu_to_le64(start_blkno);
4918 split_rec.e_flags = path_leaf_el(left_path)->l_recs[index].e_flags;
4919 split_rec.e_flags &= ~OCFS2_EXT_UNWRITTEN;
4920
Joel Beckerdc0ce612008-08-20 16:48:35 -07004921 ret = __ocfs2_mark_extent_written(inode, &et, handle, left_path,
Tao Mae7d4cb62008-08-18 17:38:44 +08004922 index, &split_rec, meta_ac,
4923 dealloc);
Mark Fasheh328d5752007-06-18 10:48:04 -07004924 if (ret)
4925 mlog_errno(ret);
4926
4927out:
4928 ocfs2_free_path(left_path);
Joel Beckerdc0ce612008-08-20 16:48:35 -07004929 ocfs2_put_extent_tree(&et);
Mark Fasheh328d5752007-06-18 10:48:04 -07004930 return ret;
4931}
4932
Tao Mae7d4cb62008-08-18 17:38:44 +08004933static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et,
Mark Fashehd0c7d702007-07-03 13:27:22 -07004934 handle_t *handle, struct ocfs2_path *path,
4935 int index, u32 new_range,
4936 struct ocfs2_alloc_context *meta_ac)
4937{
4938 int ret, depth, credits = handle->h_buffer_credits;
Mark Fashehd0c7d702007-07-03 13:27:22 -07004939 struct buffer_head *last_eb_bh = NULL;
4940 struct ocfs2_extent_block *eb;
4941 struct ocfs2_extent_list *rightmost_el, *el;
4942 struct ocfs2_extent_rec split_rec;
4943 struct ocfs2_extent_rec *rec;
4944 struct ocfs2_insert_type insert;
4945
4946 /*
4947 * Setup the record to split before we grow the tree.
4948 */
4949 el = path_leaf_el(path);
4950 rec = &el->l_recs[index];
4951 ocfs2_make_right_split_rec(inode->i_sb, &split_rec, new_range, rec);
4952
4953 depth = path->p_tree_depth;
4954 if (depth > 0) {
4955 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
Joel Becker35dc0aa2008-08-20 16:25:06 -07004956 ocfs2_et_get_last_eb_blk(et),
Mark Fashehd0c7d702007-07-03 13:27:22 -07004957 &last_eb_bh, OCFS2_BH_CACHED, inode);
4958 if (ret < 0) {
4959 mlog_errno(ret);
4960 goto out;
4961 }
4962
4963 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
4964 rightmost_el = &eb->h_list;
4965 } else
4966 rightmost_el = path_leaf_el(path);
4967
Tao Ma811f9332008-08-18 17:38:43 +08004968 credits += path->p_tree_depth +
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004969 ocfs2_extend_meta_needed(et->et_root_el);
Mark Fashehd0c7d702007-07-03 13:27:22 -07004970 ret = ocfs2_extend_trans(handle, credits);
4971 if (ret) {
4972 mlog_errno(ret);
4973 goto out;
4974 }
4975
4976 if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
4977 le16_to_cpu(rightmost_el->l_count)) {
Tao Mae7d4cb62008-08-18 17:38:44 +08004978 ret = ocfs2_grow_tree(inode, handle, et, &depth, &last_eb_bh,
Mark Fashehd0c7d702007-07-03 13:27:22 -07004979 meta_ac);
4980 if (ret) {
4981 mlog_errno(ret);
4982 goto out;
4983 }
Mark Fashehd0c7d702007-07-03 13:27:22 -07004984 }
4985
4986 memset(&insert, 0, sizeof(struct ocfs2_insert_type));
4987 insert.ins_appending = APPEND_NONE;
4988 insert.ins_contig = CONTIG_NONE;
4989 insert.ins_split = SPLIT_RIGHT;
Mark Fashehd0c7d702007-07-03 13:27:22 -07004990 insert.ins_tree_depth = depth;
4991
Tao Mae7d4cb62008-08-18 17:38:44 +08004992 ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert);
Mark Fashehd0c7d702007-07-03 13:27:22 -07004993 if (ret)
4994 mlog_errno(ret);
4995
4996out:
4997 brelse(last_eb_bh);
4998 return ret;
4999}
5000
5001static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
5002 struct ocfs2_path *path, int index,
5003 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08005004 u32 cpos, u32 len,
5005 struct ocfs2_extent_tree *et)
Mark Fashehd0c7d702007-07-03 13:27:22 -07005006{
5007 int ret;
5008 u32 left_cpos, rec_range, trunc_range;
5009 int wants_rotate = 0, is_rightmost_tree_rec = 0;
5010 struct super_block *sb = inode->i_sb;
5011 struct ocfs2_path *left_path = NULL;
5012 struct ocfs2_extent_list *el = path_leaf_el(path);
5013 struct ocfs2_extent_rec *rec;
5014 struct ocfs2_extent_block *eb;
5015
5016 if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
Tao Mae7d4cb62008-08-18 17:38:44 +08005017 ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005018 if (ret) {
5019 mlog_errno(ret);
5020 goto out;
5021 }
5022
5023 index--;
5024 }
5025
5026 if (index == (le16_to_cpu(el->l_next_free_rec) - 1) &&
5027 path->p_tree_depth) {
5028 /*
5029 * Check whether this is the rightmost tree record. If
5030 * we remove all of this record or part of its right
5031 * edge then an update of the record lengths above it
5032 * will be required.
5033 */
5034 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
5035 if (eb->h_next_leaf_blk == 0)
5036 is_rightmost_tree_rec = 1;
5037 }
5038
5039 rec = &el->l_recs[index];
5040 if (index == 0 && path->p_tree_depth &&
5041 le32_to_cpu(rec->e_cpos) == cpos) {
5042 /*
5043 * Changing the leftmost offset (via partial or whole
5044 * record truncate) of an interior (or rightmost) path
5045 * means we have to update the subtree that is formed
5046 * by this leaf and the one to it's left.
5047 *
5048 * There are two cases we can skip:
5049 * 1) Path is the leftmost one in our inode tree.
5050 * 2) The leaf is rightmost and will be empty after
5051 * we remove the extent record - the rotate code
5052 * knows how to update the newly formed edge.
5053 */
5054
5055 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path,
5056 &left_cpos);
5057 if (ret) {
5058 mlog_errno(ret);
5059 goto out;
5060 }
5061
5062 if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) {
5063 left_path = ocfs2_new_path(path_root_bh(path),
5064 path_root_el(path));
5065 if (!left_path) {
5066 ret = -ENOMEM;
5067 mlog_errno(ret);
5068 goto out;
5069 }
5070
5071 ret = ocfs2_find_path(inode, left_path, left_cpos);
5072 if (ret) {
5073 mlog_errno(ret);
5074 goto out;
5075 }
5076 }
5077 }
5078
5079 ret = ocfs2_extend_rotate_transaction(handle, 0,
5080 handle->h_buffer_credits,
5081 path);
5082 if (ret) {
5083 mlog_errno(ret);
5084 goto out;
5085 }
5086
5087 ret = ocfs2_journal_access_path(inode, handle, path);
5088 if (ret) {
5089 mlog_errno(ret);
5090 goto out;
5091 }
5092
5093 ret = ocfs2_journal_access_path(inode, handle, left_path);
5094 if (ret) {
5095 mlog_errno(ret);
5096 goto out;
5097 }
5098
5099 rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
5100 trunc_range = cpos + len;
5101
5102 if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) {
5103 int next_free;
5104
5105 memset(rec, 0, sizeof(*rec));
5106 ocfs2_cleanup_merge(el, index);
5107 wants_rotate = 1;
5108
5109 next_free = le16_to_cpu(el->l_next_free_rec);
5110 if (is_rightmost_tree_rec && next_free > 1) {
5111 /*
5112 * We skip the edge update if this path will
5113 * be deleted by the rotate code.
5114 */
5115 rec = &el->l_recs[next_free - 1];
5116 ocfs2_adjust_rightmost_records(inode, handle, path,
5117 rec);
5118 }
5119 } else if (le32_to_cpu(rec->e_cpos) == cpos) {
5120 /* Remove leftmost portion of the record. */
5121 le32_add_cpu(&rec->e_cpos, len);
5122 le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len));
5123 le16_add_cpu(&rec->e_leaf_clusters, -len);
5124 } else if (rec_range == trunc_range) {
5125 /* Remove rightmost portion of the record */
5126 le16_add_cpu(&rec->e_leaf_clusters, -len);
5127 if (is_rightmost_tree_rec)
5128 ocfs2_adjust_rightmost_records(inode, handle, path, rec);
5129 } else {
5130 /* Caller should have trapped this. */
5131 mlog(ML_ERROR, "Inode %llu: Invalid record truncate: (%u, %u) "
5132 "(%u, %u)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno,
5133 le32_to_cpu(rec->e_cpos),
5134 le16_to_cpu(rec->e_leaf_clusters), cpos, len);
5135 BUG();
5136 }
5137
5138 if (left_path) {
5139 int subtree_index;
5140
5141 subtree_index = ocfs2_find_subtree_root(inode, left_path, path);
5142 ocfs2_complete_edge_insert(inode, handle, left_path, path,
5143 subtree_index);
5144 }
5145
5146 ocfs2_journal_dirty(handle, path_leaf_bh(path));
5147
Tao Mae7d4cb62008-08-18 17:38:44 +08005148 ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005149 if (ret) {
5150 mlog_errno(ret);
5151 goto out;
5152 }
5153
5154out:
5155 ocfs2_free_path(left_path);
5156 return ret;
5157}
5158
Tao Mae7d4cb62008-08-18 17:38:44 +08005159int ocfs2_remove_extent(struct inode *inode, struct buffer_head *root_bh,
Mark Fasheh063c4562007-07-03 13:34:11 -07005160 u32 cpos, u32 len, handle_t *handle,
5161 struct ocfs2_alloc_context *meta_ac,
Tao Mae7d4cb62008-08-18 17:38:44 +08005162 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Maf56654c2008-08-18 17:38:48 +08005163 enum ocfs2_extent_tree_type et_type,
5164 void *private)
Mark Fashehd0c7d702007-07-03 13:27:22 -07005165{
5166 int ret, index;
5167 u32 rec_range, trunc_range;
5168 struct ocfs2_extent_rec *rec;
5169 struct ocfs2_extent_list *el;
Tao Mae7d4cb62008-08-18 17:38:44 +08005170 struct ocfs2_path *path = NULL;
Joel Beckerdc0ce612008-08-20 16:48:35 -07005171 struct ocfs2_extent_tree et;
Tao Mae7d4cb62008-08-18 17:38:44 +08005172
Joel Beckerdc0ce612008-08-20 16:48:35 -07005173 ocfs2_get_extent_tree(&et, inode, root_bh, et_type, private);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005174
5175 ocfs2_extent_map_trunc(inode, 0);
5176
Joel Beckerdc0ce612008-08-20 16:48:35 -07005177 path = ocfs2_new_path(et.et_root_bh, et.et_root_el);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005178 if (!path) {
5179 ret = -ENOMEM;
5180 mlog_errno(ret);
5181 goto out;
5182 }
5183
5184 ret = ocfs2_find_path(inode, path, cpos);
5185 if (ret) {
5186 mlog_errno(ret);
5187 goto out;
5188 }
5189
5190 el = path_leaf_el(path);
5191 index = ocfs2_search_extent_list(el, cpos);
5192 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
5193 ocfs2_error(inode->i_sb,
5194 "Inode %llu has an extent at cpos %u which can no "
5195 "longer be found.\n",
5196 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
5197 ret = -EROFS;
5198 goto out;
5199 }
5200
5201 /*
5202 * We have 3 cases of extent removal:
5203 * 1) Range covers the entire extent rec
5204 * 2) Range begins or ends on one edge of the extent rec
5205 * 3) Range is in the middle of the extent rec (no shared edges)
5206 *
5207 * For case 1 we remove the extent rec and left rotate to
5208 * fill the hole.
5209 *
5210 * For case 2 we just shrink the existing extent rec, with a
5211 * tree update if the shrinking edge is also the edge of an
5212 * extent block.
5213 *
5214 * For case 3 we do a right split to turn the extent rec into
5215 * something case 2 can handle.
5216 */
5217 rec = &el->l_recs[index];
5218 rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
5219 trunc_range = cpos + len;
5220
5221 BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
5222
5223 mlog(0, "Inode %llu, remove (cpos %u, len %u). Existing index %d "
5224 "(cpos %u, len %u)\n",
5225 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, index,
5226 le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec));
5227
5228 if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
5229 ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc,
Joel Beckerdc0ce612008-08-20 16:48:35 -07005230 cpos, len, &et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005231 if (ret) {
5232 mlog_errno(ret);
5233 goto out;
5234 }
5235 } else {
Joel Beckerdc0ce612008-08-20 16:48:35 -07005236 ret = ocfs2_split_tree(inode, &et, handle, path, index,
Mark Fashehd0c7d702007-07-03 13:27:22 -07005237 trunc_range, meta_ac);
5238 if (ret) {
5239 mlog_errno(ret);
5240 goto out;
5241 }
5242
5243 /*
5244 * The split could have manipulated the tree enough to
5245 * move the record location, so we have to look for it again.
5246 */
5247 ocfs2_reinit_path(path, 1);
5248
5249 ret = ocfs2_find_path(inode, path, cpos);
5250 if (ret) {
5251 mlog_errno(ret);
5252 goto out;
5253 }
5254
5255 el = path_leaf_el(path);
5256 index = ocfs2_search_extent_list(el, cpos);
5257 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
5258 ocfs2_error(inode->i_sb,
5259 "Inode %llu: split at cpos %u lost record.",
5260 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5261 cpos);
5262 ret = -EROFS;
5263 goto out;
5264 }
5265
5266 /*
5267 * Double check our values here. If anything is fishy,
5268 * it's easier to catch it at the top level.
5269 */
5270 rec = &el->l_recs[index];
5271 rec_range = le32_to_cpu(rec->e_cpos) +
5272 ocfs2_rec_clusters(el, rec);
5273 if (rec_range != trunc_range) {
5274 ocfs2_error(inode->i_sb,
5275 "Inode %llu: error after split at cpos %u"
5276 "trunc len %u, existing record is (%u,%u)",
5277 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5278 cpos, len, le32_to_cpu(rec->e_cpos),
5279 ocfs2_rec_clusters(el, rec));
5280 ret = -EROFS;
5281 goto out;
5282 }
5283
5284 ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc,
Joel Beckerdc0ce612008-08-20 16:48:35 -07005285 cpos, len, &et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005286 if (ret) {
5287 mlog_errno(ret);
5288 goto out;
5289 }
5290 }
5291
5292out:
5293 ocfs2_free_path(path);
Joel Beckerdc0ce612008-08-20 16:48:35 -07005294 ocfs2_put_extent_tree(&et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005295 return ret;
5296}
5297
Mark Fasheh063c4562007-07-03 13:34:11 -07005298int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb)
Mark Fashehccd979b2005-12-15 14:31:24 -08005299{
5300 struct buffer_head *tl_bh = osb->osb_tl_bh;
5301 struct ocfs2_dinode *di;
5302 struct ocfs2_truncate_log *tl;
5303
5304 di = (struct ocfs2_dinode *) tl_bh->b_data;
5305 tl = &di->id2.i_dealloc;
5306
5307 mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count),
5308 "slot %d, invalid truncate log parameters: used = "
5309 "%u, count = %u\n", osb->slot_num,
5310 le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));
5311 return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count);
5312}
5313
5314static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
5315 unsigned int new_start)
5316{
5317 unsigned int tail_index;
5318 unsigned int current_tail;
5319
5320 /* No records, nothing to coalesce */
5321 if (!le16_to_cpu(tl->tl_used))
5322 return 0;
5323
5324 tail_index = le16_to_cpu(tl->tl_used) - 1;
5325 current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);
5326 current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);
5327
5328 return current_tail == new_start;
5329}
5330
Mark Fasheh063c4562007-07-03 13:34:11 -07005331int ocfs2_truncate_log_append(struct ocfs2_super *osb,
5332 handle_t *handle,
5333 u64 start_blk,
5334 unsigned int num_clusters)
Mark Fashehccd979b2005-12-15 14:31:24 -08005335{
5336 int status, index;
5337 unsigned int start_cluster, tl_count;
5338 struct inode *tl_inode = osb->osb_tl_inode;
5339 struct buffer_head *tl_bh = osb->osb_tl_bh;
5340 struct ocfs2_dinode *di;
5341 struct ocfs2_truncate_log *tl;
5342
Mark Fashehb0697052006-03-03 10:24:33 -08005343 mlog_entry("start_blk = %llu, num_clusters = %u\n",
5344 (unsigned long long)start_blk, num_clusters);
Mark Fashehccd979b2005-12-15 14:31:24 -08005345
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005346 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
Mark Fashehccd979b2005-12-15 14:31:24 -08005347
5348 start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
5349
5350 di = (struct ocfs2_dinode *) tl_bh->b_data;
5351 tl = &di->id2.i_dealloc;
5352 if (!OCFS2_IS_VALID_DINODE(di)) {
5353 OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
5354 status = -EIO;
5355 goto bail;
5356 }
5357
5358 tl_count = le16_to_cpu(tl->tl_count);
5359 mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
5360 tl_count == 0,
Mark Fashehb0697052006-03-03 10:24:33 -08005361 "Truncate record count on #%llu invalid "
5362 "wanted %u, actual %u\n",
5363 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
Mark Fashehccd979b2005-12-15 14:31:24 -08005364 ocfs2_truncate_recs_per_inode(osb->sb),
5365 le16_to_cpu(tl->tl_count));
5366
5367 /* Caller should have known to flush before calling us. */
5368 index = le16_to_cpu(tl->tl_used);
5369 if (index >= tl_count) {
5370 status = -ENOSPC;
5371 mlog_errno(status);
5372 goto bail;
5373 }
5374
5375 status = ocfs2_journal_access(handle, tl_inode, tl_bh,
5376 OCFS2_JOURNAL_ACCESS_WRITE);
5377 if (status < 0) {
5378 mlog_errno(status);
5379 goto bail;
5380 }
5381
5382 mlog(0, "Log truncate of %u clusters starting at cluster %u to "
Mark Fashehb0697052006-03-03 10:24:33 -08005383 "%llu (index = %d)\n", num_clusters, start_cluster,
5384 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index);
Mark Fashehccd979b2005-12-15 14:31:24 -08005385
5386 if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
5387 /*
5388 * Move index back to the record we are coalescing with.
5389 * ocfs2_truncate_log_can_coalesce() guarantees nonzero
5390 */
5391 index--;
5392
5393 num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
5394 mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n",
5395 index, le32_to_cpu(tl->tl_recs[index].t_start),
5396 num_clusters);
5397 } else {
5398 tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
5399 tl->tl_used = cpu_to_le16(index + 1);
5400 }
5401 tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
5402
5403 status = ocfs2_journal_dirty(handle, tl_bh);
5404 if (status < 0) {
5405 mlog_errno(status);
5406 goto bail;
5407 }
5408
5409bail:
5410 mlog_exit(status);
5411 return status;
5412}
5413
5414static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -07005415 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -08005416 struct inode *data_alloc_inode,
5417 struct buffer_head *data_alloc_bh)
5418{
5419 int status = 0;
5420 int i;
5421 unsigned int num_clusters;
5422 u64 start_blk;
5423 struct ocfs2_truncate_rec rec;
5424 struct ocfs2_dinode *di;
5425 struct ocfs2_truncate_log *tl;
5426 struct inode *tl_inode = osb->osb_tl_inode;
5427 struct buffer_head *tl_bh = osb->osb_tl_bh;
5428
5429 mlog_entry_void();
5430
5431 di = (struct ocfs2_dinode *) tl_bh->b_data;
5432 tl = &di->id2.i_dealloc;
5433 i = le16_to_cpu(tl->tl_used) - 1;
5434 while (i >= 0) {
5435 /* Caller has given us at least enough credits to
5436 * update the truncate log dinode */
5437 status = ocfs2_journal_access(handle, tl_inode, tl_bh,
5438 OCFS2_JOURNAL_ACCESS_WRITE);
5439 if (status < 0) {
5440 mlog_errno(status);
5441 goto bail;
5442 }
5443
5444 tl->tl_used = cpu_to_le16(i);
5445
5446 status = ocfs2_journal_dirty(handle, tl_bh);
5447 if (status < 0) {
5448 mlog_errno(status);
5449 goto bail;
5450 }
5451
5452 /* TODO: Perhaps we can calculate the bulk of the
5453 * credits up front rather than extending like
5454 * this. */
5455 status = ocfs2_extend_trans(handle,
5456 OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
5457 if (status < 0) {
5458 mlog_errno(status);
5459 goto bail;
5460 }
5461
5462 rec = tl->tl_recs[i];
5463 start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb,
5464 le32_to_cpu(rec.t_start));
5465 num_clusters = le32_to_cpu(rec.t_clusters);
5466
5467 /* if start_blk is not set, we ignore the record as
5468 * invalid. */
5469 if (start_blk) {
5470 mlog(0, "free record %d, start = %u, clusters = %u\n",
5471 i, le32_to_cpu(rec.t_start), num_clusters);
5472
5473 status = ocfs2_free_clusters(handle, data_alloc_inode,
5474 data_alloc_bh, start_blk,
5475 num_clusters);
5476 if (status < 0) {
5477 mlog_errno(status);
5478 goto bail;
5479 }
5480 }
5481 i--;
5482 }
5483
5484bail:
5485 mlog_exit(status);
5486 return status;
5487}
5488
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005489/* Expects you to already be holding tl_inode->i_mutex */
Mark Fasheh063c4562007-07-03 13:34:11 -07005490int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
Mark Fashehccd979b2005-12-15 14:31:24 -08005491{
5492 int status;
5493 unsigned int num_to_flush;
Mark Fasheh1fabe142006-10-09 18:11:45 -07005494 handle_t *handle;
Mark Fashehccd979b2005-12-15 14:31:24 -08005495 struct inode *tl_inode = osb->osb_tl_inode;
5496 struct inode *data_alloc_inode = NULL;
5497 struct buffer_head *tl_bh = osb->osb_tl_bh;
5498 struct buffer_head *data_alloc_bh = NULL;
5499 struct ocfs2_dinode *di;
5500 struct ocfs2_truncate_log *tl;
5501
5502 mlog_entry_void();
5503
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005504 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
Mark Fashehccd979b2005-12-15 14:31:24 -08005505
5506 di = (struct ocfs2_dinode *) tl_bh->b_data;
5507 tl = &di->id2.i_dealloc;
5508 if (!OCFS2_IS_VALID_DINODE(di)) {
5509 OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
5510 status = -EIO;
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005511 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08005512 }
5513
5514 num_to_flush = le16_to_cpu(tl->tl_used);
Mark Fashehb0697052006-03-03 10:24:33 -08005515 mlog(0, "Flush %u records from truncate log #%llu\n",
5516 num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08005517 if (!num_to_flush) {
5518 status = 0;
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005519 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08005520 }
5521
5522 data_alloc_inode = ocfs2_get_system_file_inode(osb,
5523 GLOBAL_BITMAP_SYSTEM_INODE,
5524 OCFS2_INVALID_SLOT);
5525 if (!data_alloc_inode) {
5526 status = -EINVAL;
5527 mlog(ML_ERROR, "Could not get bitmap inode!\n");
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005528 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08005529 }
5530
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005531 mutex_lock(&data_alloc_inode->i_mutex);
5532
Mark Fashehe63aecb62007-10-18 15:30:42 -07005533 status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08005534 if (status < 0) {
5535 mlog_errno(status);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005536 goto out_mutex;
Mark Fashehccd979b2005-12-15 14:31:24 -08005537 }
5538
Mark Fasheh65eff9c2006-10-09 17:26:22 -07005539 handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
Mark Fashehccd979b2005-12-15 14:31:24 -08005540 if (IS_ERR(handle)) {
5541 status = PTR_ERR(handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08005542 mlog_errno(status);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005543 goto out_unlock;
Mark Fashehccd979b2005-12-15 14:31:24 -08005544 }
5545
5546 status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
5547 data_alloc_bh);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005548 if (status < 0)
Mark Fashehccd979b2005-12-15 14:31:24 -08005549 mlog_errno(status);
Mark Fashehccd979b2005-12-15 14:31:24 -08005550
Mark Fasheh02dc1af2006-10-09 16:48:10 -07005551 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08005552
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005553out_unlock:
5554 brelse(data_alloc_bh);
Mark Fashehe63aecb62007-10-18 15:30:42 -07005555 ocfs2_inode_unlock(data_alloc_inode, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08005556
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005557out_mutex:
5558 mutex_unlock(&data_alloc_inode->i_mutex);
5559 iput(data_alloc_inode);
Mark Fashehccd979b2005-12-15 14:31:24 -08005560
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005561out:
Mark Fashehccd979b2005-12-15 14:31:24 -08005562 mlog_exit(status);
5563 return status;
5564}
5565
5566int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
5567{
5568 int status;
5569 struct inode *tl_inode = osb->osb_tl_inode;
5570
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005571 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005572 status = __ocfs2_flush_truncate_log(osb);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005573 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005574
5575 return status;
5576}
5577
David Howellsc4028952006-11-22 14:57:56 +00005578static void ocfs2_truncate_log_worker(struct work_struct *work)
Mark Fashehccd979b2005-12-15 14:31:24 -08005579{
5580 int status;
David Howellsc4028952006-11-22 14:57:56 +00005581 struct ocfs2_super *osb =
5582 container_of(work, struct ocfs2_super,
5583 osb_truncate_log_wq.work);
Mark Fashehccd979b2005-12-15 14:31:24 -08005584
5585 mlog_entry_void();
5586
5587 status = ocfs2_flush_truncate_log(osb);
5588 if (status < 0)
5589 mlog_errno(status);
Tao Ma4d0ddb22008-03-05 16:11:46 +08005590 else
5591 ocfs2_init_inode_steal_slot(osb);
Mark Fashehccd979b2005-12-15 14:31:24 -08005592
5593 mlog_exit(status);
5594}
5595
5596#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
5597void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
5598 int cancel)
5599{
5600 if (osb->osb_tl_inode) {
5601 /* We want to push off log flushes while truncates are
5602 * still running. */
5603 if (cancel)
5604 cancel_delayed_work(&osb->osb_truncate_log_wq);
5605
5606 queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq,
5607 OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL);
5608 }
5609}
5610
5611static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
5612 int slot_num,
5613 struct inode **tl_inode,
5614 struct buffer_head **tl_bh)
5615{
5616 int status;
5617 struct inode *inode = NULL;
5618 struct buffer_head *bh = NULL;
5619
5620 inode = ocfs2_get_system_file_inode(osb,
5621 TRUNCATE_LOG_SYSTEM_INODE,
5622 slot_num);
5623 if (!inode) {
5624 status = -EINVAL;
5625 mlog(ML_ERROR, "Could not get load truncate log inode!\n");
5626 goto bail;
5627 }
5628
5629 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
5630 OCFS2_BH_CACHED, inode);
5631 if (status < 0) {
5632 iput(inode);
5633 mlog_errno(status);
5634 goto bail;
5635 }
5636
5637 *tl_inode = inode;
5638 *tl_bh = bh;
5639bail:
5640 mlog_exit(status);
5641 return status;
5642}
5643
5644/* called during the 1st stage of node recovery. we stamp a clean
5645 * truncate log and pass back a copy for processing later. if the
5646 * truncate log does not require processing, a *tl_copy is set to
5647 * NULL. */
5648int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
5649 int slot_num,
5650 struct ocfs2_dinode **tl_copy)
5651{
5652 int status;
5653 struct inode *tl_inode = NULL;
5654 struct buffer_head *tl_bh = NULL;
5655 struct ocfs2_dinode *di;
5656 struct ocfs2_truncate_log *tl;
5657
5658 *tl_copy = NULL;
5659
5660 mlog(0, "recover truncate log from slot %d\n", slot_num);
5661
5662 status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
5663 if (status < 0) {
5664 mlog_errno(status);
5665 goto bail;
5666 }
5667
5668 di = (struct ocfs2_dinode *) tl_bh->b_data;
5669 tl = &di->id2.i_dealloc;
5670 if (!OCFS2_IS_VALID_DINODE(di)) {
5671 OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di);
5672 status = -EIO;
5673 goto bail;
5674 }
5675
5676 if (le16_to_cpu(tl->tl_used)) {
5677 mlog(0, "We'll have %u logs to recover\n",
5678 le16_to_cpu(tl->tl_used));
5679
5680 *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
5681 if (!(*tl_copy)) {
5682 status = -ENOMEM;
5683 mlog_errno(status);
5684 goto bail;
5685 }
5686
5687 /* Assuming the write-out below goes well, this copy
5688 * will be passed back to recovery for processing. */
5689 memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size);
5690
5691 /* All we need to do to clear the truncate log is set
5692 * tl_used. */
5693 tl->tl_used = 0;
5694
5695 status = ocfs2_write_block(osb, tl_bh, tl_inode);
5696 if (status < 0) {
5697 mlog_errno(status);
5698 goto bail;
5699 }
5700 }
5701
5702bail:
5703 if (tl_inode)
5704 iput(tl_inode);
5705 if (tl_bh)
5706 brelse(tl_bh);
5707
5708 if (status < 0 && (*tl_copy)) {
5709 kfree(*tl_copy);
5710 *tl_copy = NULL;
5711 }
5712
5713 mlog_exit(status);
5714 return status;
5715}
5716
5717int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
5718 struct ocfs2_dinode *tl_copy)
5719{
5720 int status = 0;
5721 int i;
5722 unsigned int clusters, num_recs, start_cluster;
5723 u64 start_blk;
Mark Fasheh1fabe142006-10-09 18:11:45 -07005724 handle_t *handle;
Mark Fashehccd979b2005-12-15 14:31:24 -08005725 struct inode *tl_inode = osb->osb_tl_inode;
5726 struct ocfs2_truncate_log *tl;
5727
5728 mlog_entry_void();
5729
5730 if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
5731 mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
5732 return -EINVAL;
5733 }
5734
5735 tl = &tl_copy->id2.i_dealloc;
5736 num_recs = le16_to_cpu(tl->tl_used);
Mark Fashehb0697052006-03-03 10:24:33 -08005737 mlog(0, "cleanup %u records from %llu\n", num_recs,
Mark Fasheh1ca1a112007-04-27 16:01:25 -07005738 (unsigned long long)le64_to_cpu(tl_copy->i_blkno));
Mark Fashehccd979b2005-12-15 14:31:24 -08005739
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005740 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005741 for(i = 0; i < num_recs; i++) {
5742 if (ocfs2_truncate_log_needs_flush(osb)) {
5743 status = __ocfs2_flush_truncate_log(osb);
5744 if (status < 0) {
5745 mlog_errno(status);
5746 goto bail_up;
5747 }
5748 }
5749
Mark Fasheh65eff9c2006-10-09 17:26:22 -07005750 handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
Mark Fashehccd979b2005-12-15 14:31:24 -08005751 if (IS_ERR(handle)) {
5752 status = PTR_ERR(handle);
5753 mlog_errno(status);
5754 goto bail_up;
5755 }
5756
5757 clusters = le32_to_cpu(tl->tl_recs[i].t_clusters);
5758 start_cluster = le32_to_cpu(tl->tl_recs[i].t_start);
5759 start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster);
5760
5761 status = ocfs2_truncate_log_append(osb, handle,
5762 start_blk, clusters);
Mark Fasheh02dc1af2006-10-09 16:48:10 -07005763 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08005764 if (status < 0) {
5765 mlog_errno(status);
5766 goto bail_up;
5767 }
5768 }
5769
5770bail_up:
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005771 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005772
5773 mlog_exit(status);
5774 return status;
5775}
5776
5777void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
5778{
5779 int status;
5780 struct inode *tl_inode = osb->osb_tl_inode;
5781
5782 mlog_entry_void();
5783
5784 if (tl_inode) {
5785 cancel_delayed_work(&osb->osb_truncate_log_wq);
5786 flush_workqueue(ocfs2_wq);
5787
5788 status = ocfs2_flush_truncate_log(osb);
5789 if (status < 0)
5790 mlog_errno(status);
5791
5792 brelse(osb->osb_tl_bh);
5793 iput(osb->osb_tl_inode);
5794 }
5795
5796 mlog_exit_void();
5797}
5798
5799int ocfs2_truncate_log_init(struct ocfs2_super *osb)
5800{
5801 int status;
5802 struct inode *tl_inode = NULL;
5803 struct buffer_head *tl_bh = NULL;
5804
5805 mlog_entry_void();
5806
5807 status = ocfs2_get_truncate_log_info(osb,
5808 osb->slot_num,
5809 &tl_inode,
5810 &tl_bh);
5811 if (status < 0)
5812 mlog_errno(status);
5813
5814 /* ocfs2_truncate_log_shutdown keys on the existence of
5815 * osb->osb_tl_inode so we don't set any of the osb variables
5816 * until we're sure all is well. */
David Howellsc4028952006-11-22 14:57:56 +00005817 INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
5818 ocfs2_truncate_log_worker);
Mark Fashehccd979b2005-12-15 14:31:24 -08005819 osb->osb_tl_bh = tl_bh;
5820 osb->osb_tl_inode = tl_inode;
5821
5822 mlog_exit(status);
5823 return status;
5824}
5825
Mark Fasheh2b604352007-06-22 15:45:27 -07005826/*
5827 * Delayed de-allocation of suballocator blocks.
5828 *
5829 * Some sets of block de-allocations might involve multiple suballocator inodes.
5830 *
5831 * The locking for this can get extremely complicated, especially when
5832 * the suballocator inodes to delete from aren't known until deep
5833 * within an unrelated codepath.
5834 *
5835 * ocfs2_extent_block structures are a good example of this - an inode
5836 * btree could have been grown by any number of nodes each allocating
5837 * out of their own suballoc inode.
5838 *
5839 * These structures allow the delay of block de-allocation until a
5840 * later time, when locking of multiple cluster inodes won't cause
5841 * deadlock.
5842 */
5843
5844/*
5845 * Describes a single block free from a suballocator
5846 */
5847struct ocfs2_cached_block_free {
5848 struct ocfs2_cached_block_free *free_next;
5849 u64 free_blk;
5850 unsigned int free_bit;
5851};
5852
5853struct ocfs2_per_slot_free_list {
5854 struct ocfs2_per_slot_free_list *f_next_suballocator;
5855 int f_inode_type;
5856 int f_slot;
5857 struct ocfs2_cached_block_free *f_first;
5858};
5859
5860static int ocfs2_free_cached_items(struct ocfs2_super *osb,
5861 int sysfile_type,
5862 int slot,
5863 struct ocfs2_cached_block_free *head)
5864{
5865 int ret;
5866 u64 bg_blkno;
5867 handle_t *handle;
5868 struct inode *inode;
5869 struct buffer_head *di_bh = NULL;
5870 struct ocfs2_cached_block_free *tmp;
5871
5872 inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot);
5873 if (!inode) {
5874 ret = -EINVAL;
5875 mlog_errno(ret);
5876 goto out;
5877 }
5878
5879 mutex_lock(&inode->i_mutex);
5880
Mark Fashehe63aecb62007-10-18 15:30:42 -07005881 ret = ocfs2_inode_lock(inode, &di_bh, 1);
Mark Fasheh2b604352007-06-22 15:45:27 -07005882 if (ret) {
5883 mlog_errno(ret);
5884 goto out_mutex;
5885 }
5886
5887 handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
5888 if (IS_ERR(handle)) {
5889 ret = PTR_ERR(handle);
5890 mlog_errno(ret);
5891 goto out_unlock;
5892 }
5893
5894 while (head) {
5895 bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
5896 head->free_bit);
5897 mlog(0, "Free bit: (bit %u, blkno %llu)\n",
5898 head->free_bit, (unsigned long long)head->free_blk);
5899
5900 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
5901 head->free_bit, bg_blkno, 1);
5902 if (ret) {
5903 mlog_errno(ret);
5904 goto out_journal;
5905 }
5906
5907 ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE);
5908 if (ret) {
5909 mlog_errno(ret);
5910 goto out_journal;
5911 }
5912
5913 tmp = head;
5914 head = head->free_next;
5915 kfree(tmp);
5916 }
5917
5918out_journal:
5919 ocfs2_commit_trans(osb, handle);
5920
5921out_unlock:
Mark Fashehe63aecb62007-10-18 15:30:42 -07005922 ocfs2_inode_unlock(inode, 1);
Mark Fasheh2b604352007-06-22 15:45:27 -07005923 brelse(di_bh);
5924out_mutex:
5925 mutex_unlock(&inode->i_mutex);
5926 iput(inode);
5927out:
5928 while(head) {
5929 /* Premature exit may have left some dangling items. */
5930 tmp = head;
5931 head = head->free_next;
5932 kfree(tmp);
5933 }
5934
5935 return ret;
5936}
5937
5938int ocfs2_run_deallocs(struct ocfs2_super *osb,
5939 struct ocfs2_cached_dealloc_ctxt *ctxt)
5940{
5941 int ret = 0, ret2;
5942 struct ocfs2_per_slot_free_list *fl;
5943
5944 if (!ctxt)
5945 return 0;
5946
5947 while (ctxt->c_first_suballocator) {
5948 fl = ctxt->c_first_suballocator;
5949
5950 if (fl->f_first) {
5951 mlog(0, "Free items: (type %u, slot %d)\n",
5952 fl->f_inode_type, fl->f_slot);
5953 ret2 = ocfs2_free_cached_items(osb, fl->f_inode_type,
5954 fl->f_slot, fl->f_first);
5955 if (ret2)
5956 mlog_errno(ret2);
5957 if (!ret)
5958 ret = ret2;
5959 }
5960
5961 ctxt->c_first_suballocator = fl->f_next_suballocator;
5962 kfree(fl);
5963 }
5964
5965 return ret;
5966}
5967
5968static struct ocfs2_per_slot_free_list *
5969ocfs2_find_per_slot_free_list(int type,
5970 int slot,
5971 struct ocfs2_cached_dealloc_ctxt *ctxt)
5972{
5973 struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
5974
5975 while (fl) {
5976 if (fl->f_inode_type == type && fl->f_slot == slot)
5977 return fl;
5978
5979 fl = fl->f_next_suballocator;
5980 }
5981
5982 fl = kmalloc(sizeof(*fl), GFP_NOFS);
5983 if (fl) {
5984 fl->f_inode_type = type;
5985 fl->f_slot = slot;
5986 fl->f_first = NULL;
5987 fl->f_next_suballocator = ctxt->c_first_suballocator;
5988
5989 ctxt->c_first_suballocator = fl;
5990 }
5991 return fl;
5992}
5993
5994static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
5995 int type, int slot, u64 blkno,
5996 unsigned int bit)
5997{
5998 int ret;
5999 struct ocfs2_per_slot_free_list *fl;
6000 struct ocfs2_cached_block_free *item;
6001
6002 fl = ocfs2_find_per_slot_free_list(type, slot, ctxt);
6003 if (fl == NULL) {
6004 ret = -ENOMEM;
6005 mlog_errno(ret);
6006 goto out;
6007 }
6008
6009 item = kmalloc(sizeof(*item), GFP_NOFS);
6010 if (item == NULL) {
6011 ret = -ENOMEM;
6012 mlog_errno(ret);
6013 goto out;
6014 }
6015
6016 mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n",
6017 type, slot, bit, (unsigned long long)blkno);
6018
6019 item->free_blk = blkno;
6020 item->free_bit = bit;
6021 item->free_next = fl->f_first;
6022
6023 fl->f_first = item;
6024
6025 ret = 0;
6026out:
6027 return ret;
6028}
6029
Mark Fasheh59a5e412007-06-22 15:52:36 -07006030static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
6031 struct ocfs2_extent_block *eb)
6032{
6033 return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE,
6034 le16_to_cpu(eb->h_suballoc_slot),
6035 le64_to_cpu(eb->h_blkno),
6036 le16_to_cpu(eb->h_suballoc_bit));
6037}
6038
Mark Fashehccd979b2005-12-15 14:31:24 -08006039/* This function will figure out whether the currently last extent
6040 * block will be deleted, and if it will, what the new last extent
6041 * block will be so we can update his h_next_leaf_blk field, as well
6042 * as the dinodes i_last_eb_blk */
Mark Fashehdcd05382007-01-16 11:32:23 -08006043static int ocfs2_find_new_last_ext_blk(struct inode *inode,
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006044 unsigned int clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08006045 struct ocfs2_path *path,
Mark Fashehccd979b2005-12-15 14:31:24 -08006046 struct buffer_head **new_last_eb)
6047{
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006048 int next_free, ret = 0;
Mark Fashehdcd05382007-01-16 11:32:23 -08006049 u32 cpos;
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006050 struct ocfs2_extent_rec *rec;
Mark Fashehccd979b2005-12-15 14:31:24 -08006051 struct ocfs2_extent_block *eb;
6052 struct ocfs2_extent_list *el;
6053 struct buffer_head *bh = NULL;
6054
6055 *new_last_eb = NULL;
6056
Mark Fashehccd979b2005-12-15 14:31:24 -08006057 /* we have no tree, so of course, no last_eb. */
Mark Fashehdcd05382007-01-16 11:32:23 -08006058 if (!path->p_tree_depth)
6059 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08006060
6061 /* trunc to zero special case - this makes tree_depth = 0
6062 * regardless of what it is. */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006063 if (OCFS2_I(inode)->ip_clusters == clusters_to_del)
Mark Fashehdcd05382007-01-16 11:32:23 -08006064 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08006065
Mark Fashehdcd05382007-01-16 11:32:23 -08006066 el = path_leaf_el(path);
Mark Fashehccd979b2005-12-15 14:31:24 -08006067 BUG_ON(!el->l_next_free_rec);
6068
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006069 /*
6070 * Make sure that this extent list will actually be empty
6071 * after we clear away the data. We can shortcut out if
6072 * there's more than one non-empty extent in the
6073 * list. Otherwise, a check of the remaining extent is
6074 * necessary.
6075 */
6076 next_free = le16_to_cpu(el->l_next_free_rec);
6077 rec = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08006078 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006079 if (next_free > 2)
Mark Fashehdcd05382007-01-16 11:32:23 -08006080 goto out;
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006081
6082 /* We may have a valid extent in index 1, check it. */
6083 if (next_free == 2)
6084 rec = &el->l_recs[1];
6085
6086 /*
6087 * Fall through - no more nonempty extents, so we want
6088 * to delete this leaf.
6089 */
6090 } else {
6091 if (next_free > 1)
6092 goto out;
6093
6094 rec = &el->l_recs[0];
6095 }
6096
6097 if (rec) {
6098 /*
6099 * Check it we'll only be trimming off the end of this
6100 * cluster.
6101 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006102 if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del)
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006103 goto out;
6104 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006105
Mark Fashehdcd05382007-01-16 11:32:23 -08006106 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
6107 if (ret) {
6108 mlog_errno(ret);
6109 goto out;
6110 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006111
Mark Fashehdcd05382007-01-16 11:32:23 -08006112 ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh);
6113 if (ret) {
6114 mlog_errno(ret);
6115 goto out;
6116 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006117
Mark Fashehdcd05382007-01-16 11:32:23 -08006118 eb = (struct ocfs2_extent_block *) bh->b_data;
6119 el = &eb->h_list;
6120 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
6121 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
6122 ret = -EROFS;
6123 goto out;
6124 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006125
6126 *new_last_eb = bh;
6127 get_bh(*new_last_eb);
Mark Fashehdcd05382007-01-16 11:32:23 -08006128 mlog(0, "returning block %llu, (cpos: %u)\n",
6129 (unsigned long long)le64_to_cpu(eb->h_blkno), cpos);
6130out:
6131 brelse(bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08006132
Mark Fashehdcd05382007-01-16 11:32:23 -08006133 return ret;
Mark Fashehccd979b2005-12-15 14:31:24 -08006134}
6135
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006136/*
6137 * Trim some clusters off the rightmost edge of a tree. Only called
6138 * during truncate.
6139 *
6140 * The caller needs to:
6141 * - start journaling of each path component.
6142 * - compute and fully set up any new last ext block
6143 */
6144static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
6145 handle_t *handle, struct ocfs2_truncate_context *tc,
6146 u32 clusters_to_del, u64 *delete_start)
6147{
6148 int ret, i, index = path->p_tree_depth;
6149 u32 new_edge = 0;
6150 u64 deleted_eb = 0;
6151 struct buffer_head *bh;
6152 struct ocfs2_extent_list *el;
6153 struct ocfs2_extent_rec *rec;
6154
6155 *delete_start = 0;
6156
6157 while (index >= 0) {
6158 bh = path->p_node[index].bh;
6159 el = path->p_node[index].el;
6160
6161 mlog(0, "traveling tree (index = %d, block = %llu)\n",
6162 index, (unsigned long long)bh->b_blocknr);
6163
6164 BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
6165
6166 if (index !=
6167 (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) {
6168 ocfs2_error(inode->i_sb,
6169 "Inode %lu has invalid ext. block %llu",
6170 inode->i_ino,
6171 (unsigned long long)bh->b_blocknr);
6172 ret = -EROFS;
6173 goto out;
6174 }
6175
6176find_tail_record:
6177 i = le16_to_cpu(el->l_next_free_rec) - 1;
6178 rec = &el->l_recs[i];
6179
6180 mlog(0, "Extent list before: record %d: (%u, %u, %llu), "
6181 "next = %u\n", i, le32_to_cpu(rec->e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08006182 ocfs2_rec_clusters(el, rec),
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006183 (unsigned long long)le64_to_cpu(rec->e_blkno),
6184 le16_to_cpu(el->l_next_free_rec));
6185
Mark Fashehe48edee2007-03-07 16:46:57 -08006186 BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006187
6188 if (le16_to_cpu(el->l_tree_depth) == 0) {
6189 /*
6190 * If the leaf block contains a single empty
6191 * extent and no records, we can just remove
6192 * the block.
6193 */
6194 if (i == 0 && ocfs2_is_empty_extent(rec)) {
6195 memset(rec, 0,
6196 sizeof(struct ocfs2_extent_rec));
6197 el->l_next_free_rec = cpu_to_le16(0);
6198
6199 goto delete;
6200 }
6201
6202 /*
6203 * Remove any empty extents by shifting things
6204 * left. That should make life much easier on
6205 * the code below. This condition is rare
6206 * enough that we shouldn't see a performance
6207 * hit.
6208 */
6209 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
6210 le16_add_cpu(&el->l_next_free_rec, -1);
6211
6212 for(i = 0;
6213 i < le16_to_cpu(el->l_next_free_rec); i++)
6214 el->l_recs[i] = el->l_recs[i + 1];
6215
6216 memset(&el->l_recs[i], 0,
6217 sizeof(struct ocfs2_extent_rec));
6218
6219 /*
6220 * We've modified our extent list. The
6221 * simplest way to handle this change
6222 * is to being the search from the
6223 * start again.
6224 */
6225 goto find_tail_record;
6226 }
6227
Mark Fashehe48edee2007-03-07 16:46:57 -08006228 le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006229
6230 /*
6231 * We'll use "new_edge" on our way back up the
6232 * tree to know what our rightmost cpos is.
6233 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006234 new_edge = le16_to_cpu(rec->e_leaf_clusters);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006235 new_edge += le32_to_cpu(rec->e_cpos);
6236
6237 /*
6238 * The caller will use this to delete data blocks.
6239 */
6240 *delete_start = le64_to_cpu(rec->e_blkno)
6241 + ocfs2_clusters_to_blocks(inode->i_sb,
Mark Fashehe48edee2007-03-07 16:46:57 -08006242 le16_to_cpu(rec->e_leaf_clusters));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006243
6244 /*
6245 * If it's now empty, remove this record.
6246 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006247 if (le16_to_cpu(rec->e_leaf_clusters) == 0) {
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006248 memset(rec, 0,
6249 sizeof(struct ocfs2_extent_rec));
6250 le16_add_cpu(&el->l_next_free_rec, -1);
6251 }
6252 } else {
6253 if (le64_to_cpu(rec->e_blkno) == deleted_eb) {
6254 memset(rec, 0,
6255 sizeof(struct ocfs2_extent_rec));
6256 le16_add_cpu(&el->l_next_free_rec, -1);
6257
6258 goto delete;
6259 }
6260
6261 /* Can this actually happen? */
6262 if (le16_to_cpu(el->l_next_free_rec) == 0)
6263 goto delete;
6264
6265 /*
6266 * We never actually deleted any clusters
6267 * because our leaf was empty. There's no
6268 * reason to adjust the rightmost edge then.
6269 */
6270 if (new_edge == 0)
6271 goto delete;
6272
Mark Fashehe48edee2007-03-07 16:46:57 -08006273 rec->e_int_clusters = cpu_to_le32(new_edge);
6274 le32_add_cpu(&rec->e_int_clusters,
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006275 -le32_to_cpu(rec->e_cpos));
6276
6277 /*
6278 * A deleted child record should have been
6279 * caught above.
6280 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006281 BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006282 }
6283
6284delete:
6285 ret = ocfs2_journal_dirty(handle, bh);
6286 if (ret) {
6287 mlog_errno(ret);
6288 goto out;
6289 }
6290
6291 mlog(0, "extent list container %llu, after: record %d: "
6292 "(%u, %u, %llu), next = %u.\n",
6293 (unsigned long long)bh->b_blocknr, i,
Mark Fashehe48edee2007-03-07 16:46:57 -08006294 le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec),
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006295 (unsigned long long)le64_to_cpu(rec->e_blkno),
6296 le16_to_cpu(el->l_next_free_rec));
6297
6298 /*
6299 * We must be careful to only attempt delete of an
6300 * extent block (and not the root inode block).
6301 */
6302 if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) {
6303 struct ocfs2_extent_block *eb =
6304 (struct ocfs2_extent_block *)bh->b_data;
6305
6306 /*
6307 * Save this for use when processing the
6308 * parent block.
6309 */
6310 deleted_eb = le64_to_cpu(eb->h_blkno);
6311
6312 mlog(0, "deleting this extent block.\n");
6313
6314 ocfs2_remove_from_cache(inode, bh);
6315
Mark Fashehe48edee2007-03-07 16:46:57 -08006316 BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0]));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006317 BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos));
6318 BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno));
6319
Mark Fasheh59a5e412007-06-22 15:52:36 -07006320 ret = ocfs2_cache_extent_block_free(&tc->tc_dealloc, eb);
6321 /* An error here is not fatal. */
6322 if (ret < 0)
6323 mlog_errno(ret);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006324 } else {
6325 deleted_eb = 0;
6326 }
6327
6328 index--;
6329 }
6330
6331 ret = 0;
6332out:
6333 return ret;
6334}
6335
Mark Fashehccd979b2005-12-15 14:31:24 -08006336static int ocfs2_do_truncate(struct ocfs2_super *osb,
6337 unsigned int clusters_to_del,
6338 struct inode *inode,
6339 struct buffer_head *fe_bh,
Mark Fasheh1fabe142006-10-09 18:11:45 -07006340 handle_t *handle,
Mark Fashehdcd05382007-01-16 11:32:23 -08006341 struct ocfs2_truncate_context *tc,
6342 struct ocfs2_path *path)
Mark Fashehccd979b2005-12-15 14:31:24 -08006343{
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006344 int status;
Mark Fashehccd979b2005-12-15 14:31:24 -08006345 struct ocfs2_dinode *fe;
Mark Fashehccd979b2005-12-15 14:31:24 -08006346 struct ocfs2_extent_block *last_eb = NULL;
6347 struct ocfs2_extent_list *el;
Mark Fashehccd979b2005-12-15 14:31:24 -08006348 struct buffer_head *last_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08006349 u64 delete_blk = 0;
6350
6351 fe = (struct ocfs2_dinode *) fe_bh->b_data;
6352
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006353 status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08006354 path, &last_eb_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08006355 if (status < 0) {
6356 mlog_errno(status);
6357 goto bail;
6358 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006359
Mark Fashehdcd05382007-01-16 11:32:23 -08006360 /*
6361 * Each component will be touched, so we might as well journal
6362 * here to avoid having to handle errors later.
6363 */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006364 status = ocfs2_journal_access_path(inode, handle, path);
6365 if (status < 0) {
6366 mlog_errno(status);
6367 goto bail;
Mark Fashehdcd05382007-01-16 11:32:23 -08006368 }
6369
6370 if (last_eb_bh) {
6371 status = ocfs2_journal_access(handle, inode, last_eb_bh,
6372 OCFS2_JOURNAL_ACCESS_WRITE);
6373 if (status < 0) {
6374 mlog_errno(status);
6375 goto bail;
6376 }
6377
6378 last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
6379 }
6380
6381 el = &(fe->id2.i_list);
6382
6383 /*
6384 * Lower levels depend on this never happening, but it's best
6385 * to check it up here before changing the tree.
6386 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006387 if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) {
Mark Fashehdcd05382007-01-16 11:32:23 -08006388 ocfs2_error(inode->i_sb,
6389 "Inode %lu has an empty extent record, depth %u\n",
6390 inode->i_ino, le16_to_cpu(el->l_tree_depth));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006391 status = -EROFS;
Mark Fashehccd979b2005-12-15 14:31:24 -08006392 goto bail;
6393 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006394
6395 spin_lock(&OCFS2_I(inode)->ip_lock);
6396 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
6397 clusters_to_del;
6398 spin_unlock(&OCFS2_I(inode)->ip_lock);
6399 le32_add_cpu(&fe->i_clusters, -clusters_to_del);
Mark Fashehe535e2e2007-08-31 10:23:41 -07006400 inode->i_blocks = ocfs2_inode_sector_count(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -08006401
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006402 status = ocfs2_trim_tree(inode, path, handle, tc,
6403 clusters_to_del, &delete_blk);
6404 if (status) {
6405 mlog_errno(status);
6406 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08006407 }
6408
Mark Fashehdcd05382007-01-16 11:32:23 -08006409 if (le32_to_cpu(fe->i_clusters) == 0) {
Mark Fashehccd979b2005-12-15 14:31:24 -08006410 /* trunc to zero is a special case. */
6411 el->l_tree_depth = 0;
6412 fe->i_last_eb_blk = 0;
6413 } else if (last_eb)
6414 fe->i_last_eb_blk = last_eb->h_blkno;
6415
6416 status = ocfs2_journal_dirty(handle, fe_bh);
6417 if (status < 0) {
6418 mlog_errno(status);
6419 goto bail;
6420 }
6421
6422 if (last_eb) {
6423 /* If there will be a new last extent block, then by
6424 * definition, there cannot be any leaves to the right of
6425 * him. */
Mark Fashehccd979b2005-12-15 14:31:24 -08006426 last_eb->h_next_leaf_blk = 0;
6427 status = ocfs2_journal_dirty(handle, last_eb_bh);
6428 if (status < 0) {
6429 mlog_errno(status);
6430 goto bail;
6431 }
6432 }
6433
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006434 if (delete_blk) {
6435 status = ocfs2_truncate_log_append(osb, handle, delete_blk,
6436 clusters_to_del);
Mark Fashehccd979b2005-12-15 14:31:24 -08006437 if (status < 0) {
6438 mlog_errno(status);
6439 goto bail;
6440 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006441 }
6442 status = 0;
6443bail:
Mark Fashehdcd05382007-01-16 11:32:23 -08006444
Mark Fashehccd979b2005-12-15 14:31:24 -08006445 mlog_exit(status);
6446 return status;
6447}
6448
Mark Fasheh60b11392007-02-16 11:46:50 -08006449static int ocfs2_writeback_zero_func(handle_t *handle, struct buffer_head *bh)
6450{
6451 set_buffer_uptodate(bh);
6452 mark_buffer_dirty(bh);
6453 return 0;
6454}
6455
6456static int ocfs2_ordered_zero_func(handle_t *handle, struct buffer_head *bh)
6457{
6458 set_buffer_uptodate(bh);
6459 mark_buffer_dirty(bh);
6460 return ocfs2_journal_dirty_data(handle, bh);
6461}
6462
Mark Fasheh1d410a62007-09-07 14:20:45 -07006463static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
6464 unsigned int from, unsigned int to,
6465 struct page *page, int zero, u64 *phys)
6466{
6467 int ret, partial = 0;
6468
6469 ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
6470 if (ret)
6471 mlog_errno(ret);
6472
6473 if (zero)
Christoph Lametereebd2aa2008-02-04 22:28:29 -08006474 zero_user_segment(page, from, to);
Mark Fasheh1d410a62007-09-07 14:20:45 -07006475
6476 /*
6477 * Need to set the buffers we zero'd into uptodate
6478 * here if they aren't - ocfs2_map_page_blocks()
6479 * might've skipped some
6480 */
6481 if (ocfs2_should_order_data(inode)) {
6482 ret = walk_page_buffers(handle,
6483 page_buffers(page),
6484 from, to, &partial,
6485 ocfs2_ordered_zero_func);
6486 if (ret < 0)
6487 mlog_errno(ret);
6488 } else {
6489 ret = walk_page_buffers(handle, page_buffers(page),
6490 from, to, &partial,
6491 ocfs2_writeback_zero_func);
6492 if (ret < 0)
6493 mlog_errno(ret);
6494 }
6495
6496 if (!partial)
6497 SetPageUptodate(page);
6498
6499 flush_dcache_page(page);
6500}
6501
Mark Fasheh35edec12007-07-06 14:41:18 -07006502static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6503 loff_t end, struct page **pages,
6504 int numpages, u64 phys, handle_t *handle)
Mark Fasheh60b11392007-02-16 11:46:50 -08006505{
Mark Fasheh1d410a62007-09-07 14:20:45 -07006506 int i;
Mark Fasheh60b11392007-02-16 11:46:50 -08006507 struct page *page;
6508 unsigned int from, to = PAGE_CACHE_SIZE;
6509 struct super_block *sb = inode->i_sb;
6510
6511 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
6512
6513 if (numpages == 0)
6514 goto out;
6515
Mark Fasheh35edec12007-07-06 14:41:18 -07006516 to = PAGE_CACHE_SIZE;
Mark Fasheh60b11392007-02-16 11:46:50 -08006517 for(i = 0; i < numpages; i++) {
6518 page = pages[i];
6519
Mark Fasheh35edec12007-07-06 14:41:18 -07006520 from = start & (PAGE_CACHE_SIZE - 1);
6521 if ((end >> PAGE_CACHE_SHIFT) == page->index)
6522 to = end & (PAGE_CACHE_SIZE - 1);
6523
Mark Fasheh60b11392007-02-16 11:46:50 -08006524 BUG_ON(from > PAGE_CACHE_SIZE);
6525 BUG_ON(to > PAGE_CACHE_SIZE);
6526
Mark Fasheh1d410a62007-09-07 14:20:45 -07006527 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
6528 &phys);
Mark Fasheh60b11392007-02-16 11:46:50 -08006529
Mark Fasheh35edec12007-07-06 14:41:18 -07006530 start = (page->index + 1) << PAGE_CACHE_SHIFT;
Mark Fasheh60b11392007-02-16 11:46:50 -08006531 }
6532out:
Mark Fasheh1d410a62007-09-07 14:20:45 -07006533 if (pages)
6534 ocfs2_unlock_and_free_pages(pages, numpages);
Mark Fasheh60b11392007-02-16 11:46:50 -08006535}
6536
Mark Fasheh35edec12007-07-06 14:41:18 -07006537static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
Mark Fasheh1d410a62007-09-07 14:20:45 -07006538 struct page **pages, int *num)
Mark Fasheh60b11392007-02-16 11:46:50 -08006539{
Mark Fasheh1d410a62007-09-07 14:20:45 -07006540 int numpages, ret = 0;
Mark Fasheh60b11392007-02-16 11:46:50 -08006541 struct super_block *sb = inode->i_sb;
6542 struct address_space *mapping = inode->i_mapping;
6543 unsigned long index;
Mark Fasheh35edec12007-07-06 14:41:18 -07006544 loff_t last_page_bytes;
Mark Fasheh60b11392007-02-16 11:46:50 -08006545
Mark Fasheh35edec12007-07-06 14:41:18 -07006546 BUG_ON(start > end);
Mark Fasheh60b11392007-02-16 11:46:50 -08006547
Mark Fasheh35edec12007-07-06 14:41:18 -07006548 BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
6549 (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
6550
Mark Fasheh1d410a62007-09-07 14:20:45 -07006551 numpages = 0;
Mark Fasheh35edec12007-07-06 14:41:18 -07006552 last_page_bytes = PAGE_ALIGN(end);
6553 index = start >> PAGE_CACHE_SHIFT;
Mark Fasheh60b11392007-02-16 11:46:50 -08006554 do {
6555 pages[numpages] = grab_cache_page(mapping, index);
6556 if (!pages[numpages]) {
6557 ret = -ENOMEM;
6558 mlog_errno(ret);
6559 goto out;
6560 }
6561
6562 numpages++;
6563 index++;
Mark Fasheh35edec12007-07-06 14:41:18 -07006564 } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT));
Mark Fasheh60b11392007-02-16 11:46:50 -08006565
6566out:
6567 if (ret != 0) {
Mark Fasheh1d410a62007-09-07 14:20:45 -07006568 if (pages)
6569 ocfs2_unlock_and_free_pages(pages, numpages);
Mark Fasheh60b11392007-02-16 11:46:50 -08006570 numpages = 0;
6571 }
6572
6573 *num = numpages;
6574
6575 return ret;
6576}
6577
6578/*
6579 * Zero the area past i_size but still within an allocated
6580 * cluster. This avoids exposing nonzero data on subsequent file
6581 * extends.
6582 *
6583 * We need to call this before i_size is updated on the inode because
6584 * otherwise block_write_full_page() will skip writeout of pages past
6585 * i_size. The new_i_size parameter is passed for this reason.
6586 */
Mark Fasheh35edec12007-07-06 14:41:18 -07006587int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
6588 u64 range_start, u64 range_end)
Mark Fasheh60b11392007-02-16 11:46:50 -08006589{
Mark Fasheh1d410a62007-09-07 14:20:45 -07006590 int ret = 0, numpages;
Mark Fasheh60b11392007-02-16 11:46:50 -08006591 struct page **pages = NULL;
6592 u64 phys;
Mark Fasheh1d410a62007-09-07 14:20:45 -07006593 unsigned int ext_flags;
6594 struct super_block *sb = inode->i_sb;
Mark Fasheh60b11392007-02-16 11:46:50 -08006595
6596 /*
6597 * File systems which don't support sparse files zero on every
6598 * extend.
6599 */
Mark Fasheh1d410a62007-09-07 14:20:45 -07006600 if (!ocfs2_sparse_alloc(OCFS2_SB(sb)))
Mark Fasheh60b11392007-02-16 11:46:50 -08006601 return 0;
6602
Mark Fasheh1d410a62007-09-07 14:20:45 -07006603 pages = kcalloc(ocfs2_pages_per_cluster(sb),
Mark Fasheh60b11392007-02-16 11:46:50 -08006604 sizeof(struct page *), GFP_NOFS);
6605 if (pages == NULL) {
6606 ret = -ENOMEM;
6607 mlog_errno(ret);
6608 goto out;
6609 }
6610
Mark Fasheh1d410a62007-09-07 14:20:45 -07006611 if (range_start == range_end)
6612 goto out;
6613
6614 ret = ocfs2_extent_map_get_blocks(inode,
6615 range_start >> sb->s_blocksize_bits,
6616 &phys, NULL, &ext_flags);
Mark Fasheh60b11392007-02-16 11:46:50 -08006617 if (ret) {
6618 mlog_errno(ret);
6619 goto out;
6620 }
6621
Mark Fasheh1d410a62007-09-07 14:20:45 -07006622 /*
6623 * Tail is a hole, or is marked unwritten. In either case, we
6624 * can count on read and write to return/push zero's.
6625 */
6626 if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
Mark Fasheh60b11392007-02-16 11:46:50 -08006627 goto out;
6628
Mark Fasheh1d410a62007-09-07 14:20:45 -07006629 ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
6630 &numpages);
6631 if (ret) {
6632 mlog_errno(ret);
6633 goto out;
6634 }
6635
Mark Fasheh35edec12007-07-06 14:41:18 -07006636 ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
6637 numpages, phys, handle);
Mark Fasheh60b11392007-02-16 11:46:50 -08006638
6639 /*
6640 * Initiate writeout of the pages we zero'd here. We don't
6641 * wait on them - the truncate_inode_pages() call later will
6642 * do that for us.
6643 */
Mark Fasheh35edec12007-07-06 14:41:18 -07006644 ret = do_sync_mapping_range(inode->i_mapping, range_start,
6645 range_end - 1, SYNC_FILE_RANGE_WRITE);
Mark Fasheh60b11392007-02-16 11:46:50 -08006646 if (ret)
6647 mlog_errno(ret);
6648
6649out:
6650 if (pages)
6651 kfree(pages);
6652
6653 return ret;
6654}
6655
Tiger Yangfdd77702008-08-18 17:08:55 +08006656static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode,
6657 struct ocfs2_dinode *di)
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006658{
6659 unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits;
Tiger Yangfdd77702008-08-18 17:08:55 +08006660 unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006661
Tiger Yangfdd77702008-08-18 17:08:55 +08006662 if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL)
6663 memset(&di->id2, 0, blocksize -
6664 offsetof(struct ocfs2_dinode, id2) -
6665 xattrsize);
6666 else
6667 memset(&di->id2, 0, blocksize -
6668 offsetof(struct ocfs2_dinode, id2));
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006669}
6670
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006671void ocfs2_dinode_new_extent_list(struct inode *inode,
6672 struct ocfs2_dinode *di)
6673{
Tiger Yangfdd77702008-08-18 17:08:55 +08006674 ocfs2_zero_dinode_id2_with_xattr(inode, di);
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006675 di->id2.i_list.l_tree_depth = 0;
6676 di->id2.i_list.l_next_free_rec = 0;
Tiger Yangfdd77702008-08-18 17:08:55 +08006677 di->id2.i_list.l_count = cpu_to_le16(
6678 ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di));
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006679}
6680
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006681void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
6682{
6683 struct ocfs2_inode_info *oi = OCFS2_I(inode);
6684 struct ocfs2_inline_data *idata = &di->id2.i_data;
6685
6686 spin_lock(&oi->ip_lock);
6687 oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
6688 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
6689 spin_unlock(&oi->ip_lock);
6690
6691 /*
6692 * We clear the entire i_data structure here so that all
6693 * fields can be properly initialized.
6694 */
Tiger Yangfdd77702008-08-18 17:08:55 +08006695 ocfs2_zero_dinode_id2_with_xattr(inode, di);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006696
Tiger Yangfdd77702008-08-18 17:08:55 +08006697 idata->id_count = cpu_to_le16(
6698 ocfs2_max_inline_data_with_xattr(inode->i_sb, di));
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006699}
6700
6701int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6702 struct buffer_head *di_bh)
6703{
6704 int ret, i, has_data, num_pages = 0;
6705 handle_t *handle;
6706 u64 uninitialized_var(block);
6707 struct ocfs2_inode_info *oi = OCFS2_I(inode);
6708 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
6709 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006710 struct ocfs2_alloc_context *data_ac = NULL;
6711 struct page **pages = NULL;
6712 loff_t end = osb->s_clustersize;
6713
6714 has_data = i_size_read(inode) ? 1 : 0;
6715
6716 if (has_data) {
6717 pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
6718 sizeof(struct page *), GFP_NOFS);
6719 if (pages == NULL) {
6720 ret = -ENOMEM;
6721 mlog_errno(ret);
6722 goto out;
6723 }
6724
6725 ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
6726 if (ret) {
6727 mlog_errno(ret);
6728 goto out;
6729 }
6730 }
6731
6732 handle = ocfs2_start_trans(osb, OCFS2_INLINE_TO_EXTENTS_CREDITS);
6733 if (IS_ERR(handle)) {
6734 ret = PTR_ERR(handle);
6735 mlog_errno(ret);
6736 goto out_unlock;
6737 }
6738
6739 ret = ocfs2_journal_access(handle, inode, di_bh,
6740 OCFS2_JOURNAL_ACCESS_WRITE);
6741 if (ret) {
6742 mlog_errno(ret);
6743 goto out_commit;
6744 }
6745
6746 if (has_data) {
6747 u32 bit_off, num;
6748 unsigned int page_end;
6749 u64 phys;
6750
6751 ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
6752 &num);
6753 if (ret) {
6754 mlog_errno(ret);
6755 goto out_commit;
6756 }
6757
6758 /*
6759 * Save two copies, one for insert, and one that can
6760 * be changed by ocfs2_map_and_dirty_page() below.
6761 */
6762 block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
6763
6764 /*
6765 * Non sparse file systems zero on extend, so no need
6766 * to do that now.
6767 */
6768 if (!ocfs2_sparse_alloc(osb) &&
6769 PAGE_CACHE_SIZE < osb->s_clustersize)
6770 end = PAGE_CACHE_SIZE;
6771
6772 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
6773 if (ret) {
6774 mlog_errno(ret);
6775 goto out_commit;
6776 }
6777
6778 /*
6779 * This should populate the 1st page for us and mark
6780 * it up to date.
6781 */
6782 ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
6783 if (ret) {
6784 mlog_errno(ret);
6785 goto out_commit;
6786 }
6787
6788 page_end = PAGE_CACHE_SIZE;
6789 if (PAGE_CACHE_SIZE > osb->s_clustersize)
6790 page_end = osb->s_clustersize;
6791
6792 for (i = 0; i < num_pages; i++)
6793 ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
6794 pages[i], i > 0, &phys);
6795 }
6796
6797 spin_lock(&oi->ip_lock);
6798 oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
6799 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
6800 spin_unlock(&oi->ip_lock);
6801
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006802 ocfs2_dinode_new_extent_list(inode, di);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006803
6804 ocfs2_journal_dirty(handle, di_bh);
6805
6806 if (has_data) {
6807 /*
6808 * An error at this point should be extremely rare. If
6809 * this proves to be false, we could always re-build
6810 * the in-inode data from our pages.
6811 */
Tao Maf56654c2008-08-18 17:38:48 +08006812 ret = ocfs2_dinode_insert_extent(osb, handle, inode, di_bh,
6813 0, block, 1, 0, NULL);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006814 if (ret) {
6815 mlog_errno(ret);
6816 goto out_commit;
6817 }
6818
6819 inode->i_blocks = ocfs2_inode_sector_count(inode);
6820 }
6821
6822out_commit:
6823 ocfs2_commit_trans(osb, handle);
6824
6825out_unlock:
6826 if (data_ac)
6827 ocfs2_free_alloc_context(data_ac);
6828
6829out:
6830 if (pages) {
6831 ocfs2_unlock_and_free_pages(pages, num_pages);
6832 kfree(pages);
6833 }
6834
6835 return ret;
6836}
6837
Mark Fashehccd979b2005-12-15 14:31:24 -08006838/*
6839 * It is expected, that by the time you call this function,
6840 * inode->i_size and fe->i_size have been adjusted.
6841 *
6842 * WARNING: This will kfree the truncate context
6843 */
6844int ocfs2_commit_truncate(struct ocfs2_super *osb,
6845 struct inode *inode,
6846 struct buffer_head *fe_bh,
6847 struct ocfs2_truncate_context *tc)
6848{
6849 int status, i, credits, tl_sem = 0;
Mark Fashehdcd05382007-01-16 11:32:23 -08006850 u32 clusters_to_del, new_highest_cpos, range;
Mark Fashehccd979b2005-12-15 14:31:24 -08006851 struct ocfs2_extent_list *el;
Mark Fasheh1fabe142006-10-09 18:11:45 -07006852 handle_t *handle = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08006853 struct inode *tl_inode = osb->osb_tl_inode;
Mark Fashehdcd05382007-01-16 11:32:23 -08006854 struct ocfs2_path *path = NULL;
Tao Mae7d4cb62008-08-18 17:38:44 +08006855 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
Mark Fashehccd979b2005-12-15 14:31:24 -08006856
6857 mlog_entry_void();
6858
Mark Fashehdcd05382007-01-16 11:32:23 -08006859 new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb,
Mark Fashehccd979b2005-12-15 14:31:24 -08006860 i_size_read(inode));
6861
Tao Mae7d4cb62008-08-18 17:38:44 +08006862 path = ocfs2_new_path(fe_bh, &di->id2.i_list);
Mark Fashehdcd05382007-01-16 11:32:23 -08006863 if (!path) {
6864 status = -ENOMEM;
6865 mlog_errno(status);
6866 goto bail;
6867 }
Mark Fasheh83418972007-04-23 18:53:12 -07006868
6869 ocfs2_extent_map_trunc(inode, new_highest_cpos);
6870
Mark Fashehccd979b2005-12-15 14:31:24 -08006871start:
Mark Fashehdcd05382007-01-16 11:32:23 -08006872 /*
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006873 * Check that we still have allocation to delete.
6874 */
6875 if (OCFS2_I(inode)->ip_clusters == 0) {
6876 status = 0;
6877 goto bail;
6878 }
6879
6880 /*
Mark Fashehdcd05382007-01-16 11:32:23 -08006881 * Truncate always works against the rightmost tree branch.
6882 */
6883 status = ocfs2_find_path(inode, path, UINT_MAX);
6884 if (status) {
6885 mlog_errno(status);
6886 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08006887 }
6888
Mark Fashehdcd05382007-01-16 11:32:23 -08006889 mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n",
6890 OCFS2_I(inode)->ip_clusters, path->p_tree_depth);
6891
6892 /*
6893 * By now, el will point to the extent list on the bottom most
6894 * portion of this tree. Only the tail record is considered in
6895 * each pass.
6896 *
6897 * We handle the following cases, in order:
6898 * - empty extent: delete the remaining branch
6899 * - remove the entire record
6900 * - remove a partial record
6901 * - no record needs to be removed (truncate has completed)
6902 */
6903 el = path_leaf_el(path);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006904 if (le16_to_cpu(el->l_next_free_rec) == 0) {
6905 ocfs2_error(inode->i_sb,
6906 "Inode %llu has empty extent block at %llu\n",
6907 (unsigned long long)OCFS2_I(inode)->ip_blkno,
6908 (unsigned long long)path_leaf_bh(path)->b_blocknr);
6909 status = -EROFS;
6910 goto bail;
6911 }
6912
Mark Fashehccd979b2005-12-15 14:31:24 -08006913 i = le16_to_cpu(el->l_next_free_rec) - 1;
Mark Fashehdcd05382007-01-16 11:32:23 -08006914 range = le32_to_cpu(el->l_recs[i].e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -08006915 ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -08006916 if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) {
6917 clusters_to_del = 0;
6918 } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
Mark Fashehe48edee2007-03-07 16:46:57 -08006919 clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -08006920 } else if (range > new_highest_cpos) {
Mark Fashehe48edee2007-03-07 16:46:57 -08006921 clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
Mark Fashehccd979b2005-12-15 14:31:24 -08006922 le32_to_cpu(el->l_recs[i].e_cpos)) -
Mark Fashehdcd05382007-01-16 11:32:23 -08006923 new_highest_cpos;
6924 } else {
6925 status = 0;
6926 goto bail;
6927 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006928
Mark Fashehdcd05382007-01-16 11:32:23 -08006929 mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n",
6930 clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr);
6931
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08006932 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08006933 tl_sem = 1;
6934 /* ocfs2_truncate_log_needs_flush guarantees us at least one
6935 * record is free for use. If there isn't any, we flush to get
6936 * an empty truncate log. */
6937 if (ocfs2_truncate_log_needs_flush(osb)) {
6938 status = __ocfs2_flush_truncate_log(osb);
6939 if (status < 0) {
6940 mlog_errno(status);
6941 goto bail;
6942 }
6943 }
6944
6945 credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08006946 (struct ocfs2_dinode *)fe_bh->b_data,
6947 el);
Mark Fasheh65eff9c2006-10-09 17:26:22 -07006948 handle = ocfs2_start_trans(osb, credits);
Mark Fashehccd979b2005-12-15 14:31:24 -08006949 if (IS_ERR(handle)) {
6950 status = PTR_ERR(handle);
6951 handle = NULL;
6952 mlog_errno(status);
6953 goto bail;
6954 }
6955
Mark Fashehdcd05382007-01-16 11:32:23 -08006956 status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle,
6957 tc, path);
Mark Fashehccd979b2005-12-15 14:31:24 -08006958 if (status < 0) {
6959 mlog_errno(status);
6960 goto bail;
6961 }
6962
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08006963 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08006964 tl_sem = 0;
6965
Mark Fasheh02dc1af2006-10-09 16:48:10 -07006966 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08006967 handle = NULL;
6968
Mark Fashehdcd05382007-01-16 11:32:23 -08006969 ocfs2_reinit_path(path, 1);
6970
6971 /*
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006972 * The check above will catch the case where we've truncated
6973 * away all allocation.
Mark Fashehdcd05382007-01-16 11:32:23 -08006974 */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006975 goto start;
6976
Mark Fashehccd979b2005-12-15 14:31:24 -08006977bail:
Mark Fashehccd979b2005-12-15 14:31:24 -08006978
6979 ocfs2_schedule_truncate_log_flush(osb, 1);
6980
6981 if (tl_sem)
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08006982 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08006983
6984 if (handle)
Mark Fasheh02dc1af2006-10-09 16:48:10 -07006985 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08006986
Mark Fasheh59a5e412007-06-22 15:52:36 -07006987 ocfs2_run_deallocs(osb, &tc->tc_dealloc);
6988
Mark Fashehdcd05382007-01-16 11:32:23 -08006989 ocfs2_free_path(path);
Mark Fashehccd979b2005-12-15 14:31:24 -08006990
6991 /* This will drop the ext_alloc cluster lock for us */
6992 ocfs2_free_truncate_context(tc);
6993
6994 mlog_exit(status);
6995 return status;
6996}
6997
Mark Fashehccd979b2005-12-15 14:31:24 -08006998/*
Mark Fasheh59a5e412007-06-22 15:52:36 -07006999 * Expects the inode to already be locked.
Mark Fashehccd979b2005-12-15 14:31:24 -08007000 */
7001int ocfs2_prepare_truncate(struct ocfs2_super *osb,
7002 struct inode *inode,
7003 struct buffer_head *fe_bh,
7004 struct ocfs2_truncate_context **tc)
7005{
Mark Fasheh59a5e412007-06-22 15:52:36 -07007006 int status;
Mark Fashehccd979b2005-12-15 14:31:24 -08007007 unsigned int new_i_clusters;
7008 struct ocfs2_dinode *fe;
7009 struct ocfs2_extent_block *eb;
Mark Fashehccd979b2005-12-15 14:31:24 -08007010 struct buffer_head *last_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08007011
7012 mlog_entry_void();
7013
7014 *tc = NULL;
7015
7016 new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
7017 i_size_read(inode));
7018 fe = (struct ocfs2_dinode *) fe_bh->b_data;
7019
7020 mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
Mark Fasheh1ca1a112007-04-27 16:01:25 -07007021 "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
7022 (unsigned long long)le64_to_cpu(fe->i_size));
Mark Fashehccd979b2005-12-15 14:31:24 -08007023
Robert P. J. Daycd861282006-12-13 00:34:52 -08007024 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
Mark Fashehccd979b2005-12-15 14:31:24 -08007025 if (!(*tc)) {
7026 status = -ENOMEM;
7027 mlog_errno(status);
7028 goto bail;
7029 }
Mark Fasheh59a5e412007-06-22 15:52:36 -07007030 ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
Mark Fashehccd979b2005-12-15 14:31:24 -08007031
Mark Fashehccd979b2005-12-15 14:31:24 -08007032 if (fe->id2.i_list.l_tree_depth) {
Mark Fashehccd979b2005-12-15 14:31:24 -08007033 status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
7034 &last_eb_bh, OCFS2_BH_CACHED, inode);
7035 if (status < 0) {
7036 mlog_errno(status);
7037 goto bail;
7038 }
7039 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
7040 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
7041 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
7042
7043 brelse(last_eb_bh);
7044 status = -EIO;
7045 goto bail;
7046 }
Mark Fashehccd979b2005-12-15 14:31:24 -08007047 }
7048
7049 (*tc)->tc_last_eb_bh = last_eb_bh;
7050
Mark Fashehccd979b2005-12-15 14:31:24 -08007051 status = 0;
7052bail:
7053 if (status < 0) {
7054 if (*tc)
7055 ocfs2_free_truncate_context(*tc);
7056 *tc = NULL;
7057 }
7058 mlog_exit_void();
7059 return status;
7060}
7061
Mark Fasheh1afc32b2007-09-07 14:46:51 -07007062/*
7063 * 'start' is inclusive, 'end' is not.
7064 */
7065int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
7066 unsigned int start, unsigned int end, int trunc)
7067{
7068 int ret;
7069 unsigned int numbytes;
7070 handle_t *handle;
7071 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
7072 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
7073 struct ocfs2_inline_data *idata = &di->id2.i_data;
7074
7075 if (end > i_size_read(inode))
7076 end = i_size_read(inode);
7077
7078 BUG_ON(start >= end);
7079
7080 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
7081 !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
7082 !ocfs2_supports_inline_data(osb)) {
7083 ocfs2_error(inode->i_sb,
7084 "Inline data flags for inode %llu don't agree! "
7085 "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
7086 (unsigned long long)OCFS2_I(inode)->ip_blkno,
7087 le16_to_cpu(di->i_dyn_features),
7088 OCFS2_I(inode)->ip_dyn_features,
7089 osb->s_feature_incompat);
7090 ret = -EROFS;
7091 goto out;
7092 }
7093
7094 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
7095 if (IS_ERR(handle)) {
7096 ret = PTR_ERR(handle);
7097 mlog_errno(ret);
7098 goto out;
7099 }
7100
7101 ret = ocfs2_journal_access(handle, inode, di_bh,
7102 OCFS2_JOURNAL_ACCESS_WRITE);
7103 if (ret) {
7104 mlog_errno(ret);
7105 goto out_commit;
7106 }
7107
7108 numbytes = end - start;
7109 memset(idata->id_data + start, 0, numbytes);
7110
7111 /*
7112 * No need to worry about the data page here - it's been
7113 * truncated already and inline data doesn't need it for
7114 * pushing zero's to disk, so we'll let readpage pick it up
7115 * later.
7116 */
7117 if (trunc) {
7118 i_size_write(inode, start);
7119 di->i_size = cpu_to_le64(start);
7120 }
7121
7122 inode->i_blocks = ocfs2_inode_sector_count(inode);
7123 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
7124
7125 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
7126 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
7127
7128 ocfs2_journal_dirty(handle, di_bh);
7129
7130out_commit:
7131 ocfs2_commit_trans(osb, handle);
7132
7133out:
7134 return ret;
7135}
7136
Mark Fashehccd979b2005-12-15 14:31:24 -08007137static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
7138{
Mark Fasheh59a5e412007-06-22 15:52:36 -07007139 /*
7140 * The caller is responsible for completing deallocation
7141 * before freeing the context.
7142 */
7143 if (tc->tc_dealloc.c_first_suballocator != NULL)
7144 mlog(ML_NOTICE,
7145 "Truncate completion has non-empty dealloc context\n");
Mark Fashehccd979b2005-12-15 14:31:24 -08007146
7147 if (tc->tc_last_eb_bh)
7148 brelse(tc->tc_last_eb_bh);
7149
7150 kfree(tc);
7151}