| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/fs/ext3/balloc.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1992, 1993, 1994, 1995 | 
|  | 5 | * Remy Card (card@masi.ibp.fr) | 
|  | 6 | * Laboratoire MASI - Institut Blaise Pascal | 
|  | 7 | * Universite Pierre et Marie Curie (Paris VI) | 
|  | 8 | * | 
|  | 9 | *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 | 
|  | 10 | *  Big-endian to little-endian byte-swapping/bitmaps by | 
|  | 11 | *        David S. Miller (davem@caip.rutgers.edu), 1995 | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #include <linux/config.h> | 
|  | 15 | #include <linux/time.h> | 
|  | 16 | #include <linux/fs.h> | 
|  | 17 | #include <linux/jbd.h> | 
|  | 18 | #include <linux/ext3_fs.h> | 
|  | 19 | #include <linux/ext3_jbd.h> | 
|  | 20 | #include <linux/quotaops.h> | 
|  | 21 | #include <linux/buffer_head.h> | 
|  | 22 |  | 
|  | 23 | /* | 
|  | 24 | * balloc.c contains the blocks allocation and deallocation routines | 
|  | 25 | */ | 
|  | 26 |  | 
|  | 27 | /* | 
|  | 28 | * The free blocks are managed by bitmaps.  A file system contains several | 
|  | 29 | * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap | 
|  | 30 | * block for inodes, N blocks for the inode table and data blocks. | 
|  | 31 | * | 
|  | 32 | * The file system contains group descriptors which are located after the | 
|  | 33 | * super block.  Each descriptor contains the number of the bitmap block and | 
|  | 34 | * the free blocks count in the block.  The descriptors are loaded in memory | 
|  | 35 | * when a file system is mounted (see ext3_read_super). | 
|  | 36 | */ | 
|  | 37 |  | 
|  | 38 |  | 
|  | 39 | #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1) | 
|  | 40 |  | 
|  | 41 | struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | 
|  | 42 | unsigned int block_group, | 
|  | 43 | struct buffer_head ** bh) | 
|  | 44 | { | 
|  | 45 | unsigned long group_desc; | 
|  | 46 | unsigned long offset; | 
|  | 47 | struct ext3_group_desc * desc; | 
|  | 48 | struct ext3_sb_info *sbi = EXT3_SB(sb); | 
|  | 49 |  | 
|  | 50 | if (block_group >= sbi->s_groups_count) { | 
|  | 51 | ext3_error (sb, "ext3_get_group_desc", | 
|  | 52 | "block_group >= groups_count - " | 
|  | 53 | "block_group = %d, groups_count = %lu", | 
|  | 54 | block_group, sbi->s_groups_count); | 
|  | 55 |  | 
|  | 56 | return NULL; | 
|  | 57 | } | 
|  | 58 | smp_rmb(); | 
|  | 59 |  | 
|  | 60 | group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); | 
|  | 61 | offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); | 
|  | 62 | if (!sbi->s_group_desc[group_desc]) { | 
|  | 63 | ext3_error (sb, "ext3_get_group_desc", | 
|  | 64 | "Group descriptor not loaded - " | 
|  | 65 | "block_group = %d, group_desc = %lu, desc = %lu", | 
|  | 66 | block_group, group_desc, offset); | 
|  | 67 | return NULL; | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data; | 
|  | 71 | if (bh) | 
|  | 72 | *bh = sbi->s_group_desc[group_desc]; | 
|  | 73 | return desc + offset; | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | /* | 
|  | 77 | * Read the bitmap for a given block_group, reading into the specified | 
|  | 78 | * slot in the superblock's bitmap cache. | 
|  | 79 | * | 
|  | 80 | * Return buffer_head on success or NULL in case of failure. | 
|  | 81 | */ | 
|  | 82 | static struct buffer_head * | 
|  | 83 | read_block_bitmap(struct super_block *sb, unsigned int block_group) | 
|  | 84 | { | 
|  | 85 | struct ext3_group_desc * desc; | 
|  | 86 | struct buffer_head * bh = NULL; | 
|  | 87 |  | 
|  | 88 | desc = ext3_get_group_desc (sb, block_group, NULL); | 
|  | 89 | if (!desc) | 
|  | 90 | goto error_out; | 
|  | 91 | bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); | 
|  | 92 | if (!bh) | 
|  | 93 | ext3_error (sb, "read_block_bitmap", | 
|  | 94 | "Cannot read block bitmap - " | 
|  | 95 | "block_group = %d, block_bitmap = %u", | 
|  | 96 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | 
|  | 97 | error_out: | 
|  | 98 | return bh; | 
|  | 99 | } | 
|  | 100 | /* | 
|  | 101 | * The reservation window structure operations | 
|  | 102 | * -------------------------------------------- | 
|  | 103 | * Operations include: | 
|  | 104 | * dump, find, add, remove, is_empty, find_next_reservable_window, etc. | 
|  | 105 | * | 
|  | 106 | * We use sorted double linked list for the per-filesystem reservation | 
|  | 107 | * window list. (like in vm_region). | 
|  | 108 | * | 
|  | 109 | * Initially, we keep those small operations in the abstract functions, | 
|  | 110 | * so later if we need a better searching tree than double linked-list, | 
|  | 111 | * we could easily switch to that without changing too much | 
|  | 112 | * code. | 
|  | 113 | */ | 
|  | 114 | #if 0 | 
|  | 115 | static void __rsv_window_dump(struct rb_root *root, int verbose, | 
|  | 116 | const char *fn) | 
|  | 117 | { | 
|  | 118 | struct rb_node *n; | 
|  | 119 | struct ext3_reserve_window_node *rsv, *prev; | 
|  | 120 | int bad; | 
|  | 121 |  | 
|  | 122 | restart: | 
|  | 123 | n = rb_first(root); | 
|  | 124 | bad = 0; | 
|  | 125 | prev = NULL; | 
|  | 126 |  | 
|  | 127 | printk("Block Allocation Reservation Windows Map (%s):\n", fn); | 
|  | 128 | while (n) { | 
|  | 129 | rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); | 
|  | 130 | if (verbose) | 
|  | 131 | printk("reservation window 0x%p " | 
|  | 132 | "start:  %d, end:  %d\n", | 
|  | 133 | rsv, rsv->rsv_start, rsv->rsv_end); | 
|  | 134 | if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { | 
|  | 135 | printk("Bad reservation %p (start >= end)\n", | 
|  | 136 | rsv); | 
|  | 137 | bad = 1; | 
|  | 138 | } | 
|  | 139 | if (prev && prev->rsv_end >= rsv->rsv_start) { | 
|  | 140 | printk("Bad reservation %p (prev->end >= start)\n", | 
|  | 141 | rsv); | 
|  | 142 | bad = 1; | 
|  | 143 | } | 
|  | 144 | if (bad) { | 
|  | 145 | if (!verbose) { | 
|  | 146 | printk("Restarting reservation walk in verbose mode\n"); | 
|  | 147 | verbose = 1; | 
|  | 148 | goto restart; | 
|  | 149 | } | 
|  | 150 | } | 
|  | 151 | n = rb_next(n); | 
|  | 152 | prev = rsv; | 
|  | 153 | } | 
|  | 154 | printk("Window map complete.\n"); | 
|  | 155 | if (bad) | 
|  | 156 | BUG(); | 
|  | 157 | } | 
|  | 158 | #define rsv_window_dump(root, verbose) \ | 
|  | 159 | __rsv_window_dump((root), (verbose), __FUNCTION__) | 
|  | 160 | #else | 
|  | 161 | #define rsv_window_dump(root, verbose) do {} while (0) | 
|  | 162 | #endif | 
|  | 163 |  | 
|  | 164 | static int | 
|  | 165 | goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal, | 
|  | 166 | unsigned int group, struct super_block * sb) | 
|  | 167 | { | 
|  | 168 | unsigned long group_first_block, group_last_block; | 
|  | 169 |  | 
|  | 170 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | 
|  | 171 | group * EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 172 | group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | 
|  | 173 |  | 
|  | 174 | if ((rsv->_rsv_start > group_last_block) || | 
|  | 175 | (rsv->_rsv_end < group_first_block)) | 
|  | 176 | return 0; | 
|  | 177 | if ((goal >= 0) && ((goal + group_first_block < rsv->_rsv_start) | 
|  | 178 | || (goal + group_first_block > rsv->_rsv_end))) | 
|  | 179 | return 0; | 
|  | 180 | return 1; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | /* | 
|  | 184 | * Find the reserved window which includes the goal, or the previous one | 
|  | 185 | * if the goal is not in any window. | 
|  | 186 | * Returns NULL if there are no windows or if all windows start after the goal. | 
|  | 187 | */ | 
|  | 188 | static struct ext3_reserve_window_node * | 
|  | 189 | search_reserve_window(struct rb_root *root, unsigned long goal) | 
|  | 190 | { | 
|  | 191 | struct rb_node *n = root->rb_node; | 
|  | 192 | struct ext3_reserve_window_node *rsv; | 
|  | 193 |  | 
|  | 194 | if (!n) | 
|  | 195 | return NULL; | 
|  | 196 |  | 
|  | 197 | do { | 
|  | 198 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | 
|  | 199 |  | 
|  | 200 | if (goal < rsv->rsv_start) | 
|  | 201 | n = n->rb_left; | 
|  | 202 | else if (goal > rsv->rsv_end) | 
|  | 203 | n = n->rb_right; | 
|  | 204 | else | 
|  | 205 | return rsv; | 
|  | 206 | } while (n); | 
|  | 207 | /* | 
|  | 208 | * We've fallen off the end of the tree: the goal wasn't inside | 
|  | 209 | * any particular node.  OK, the previous node must be to one | 
|  | 210 | * side of the interval containing the goal.  If it's the RHS, | 
|  | 211 | * we need to back up one. | 
|  | 212 | */ | 
|  | 213 | if (rsv->rsv_start > goal) { | 
|  | 214 | n = rb_prev(&rsv->rsv_node); | 
|  | 215 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | 
|  | 216 | } | 
|  | 217 | return rsv; | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | void ext3_rsv_window_add(struct super_block *sb, | 
|  | 221 | struct ext3_reserve_window_node *rsv) | 
|  | 222 | { | 
|  | 223 | struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root; | 
|  | 224 | struct rb_node *node = &rsv->rsv_node; | 
|  | 225 | unsigned int start = rsv->rsv_start; | 
|  | 226 |  | 
|  | 227 | struct rb_node ** p = &root->rb_node; | 
|  | 228 | struct rb_node * parent = NULL; | 
|  | 229 | struct ext3_reserve_window_node *this; | 
|  | 230 |  | 
|  | 231 | while (*p) | 
|  | 232 | { | 
|  | 233 | parent = *p; | 
|  | 234 | this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node); | 
|  | 235 |  | 
|  | 236 | if (start < this->rsv_start) | 
|  | 237 | p = &(*p)->rb_left; | 
|  | 238 | else if (start > this->rsv_end) | 
|  | 239 | p = &(*p)->rb_right; | 
|  | 240 | else | 
|  | 241 | BUG(); | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | rb_link_node(node, parent, p); | 
|  | 245 | rb_insert_color(node, root); | 
|  | 246 | } | 
|  | 247 |  | 
|  | 248 | static void rsv_window_remove(struct super_block *sb, | 
|  | 249 | struct ext3_reserve_window_node *rsv) | 
|  | 250 | { | 
|  | 251 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 
|  | 252 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 
|  | 253 | rsv->rsv_alloc_hit = 0; | 
|  | 254 | rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | static inline int rsv_is_empty(struct ext3_reserve_window *rsv) | 
|  | 258 | { | 
|  | 259 | /* a valid reservation end block could not be 0 */ | 
|  | 260 | return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED); | 
|  | 261 | } | 
|  | 262 | void ext3_init_block_alloc_info(struct inode *inode) | 
|  | 263 | { | 
|  | 264 | struct ext3_inode_info *ei = EXT3_I(inode); | 
|  | 265 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | 
|  | 266 | struct super_block *sb = inode->i_sb; | 
|  | 267 |  | 
|  | 268 | block_i = kmalloc(sizeof(*block_i), GFP_NOFS); | 
|  | 269 | if (block_i) { | 
|  | 270 | struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node; | 
|  | 271 |  | 
|  | 272 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 
|  | 273 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | 
|  | 274 |  | 
|  | 275 | /* | 
|  | 276 | * if filesystem is mounted with NORESERVATION, the goal | 
|  | 277 | * reservation window size is set to zero to indicate | 
|  | 278 | * block reservation is off | 
|  | 279 | */ | 
|  | 280 | if (!test_opt(sb, RESERVATION)) | 
|  | 281 | rsv->rsv_goal_size = 0; | 
|  | 282 | else | 
|  | 283 | rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS; | 
|  | 284 | rsv->rsv_alloc_hit = 0; | 
|  | 285 | block_i->last_alloc_logical_block = 0; | 
|  | 286 | block_i->last_alloc_physical_block = 0; | 
|  | 287 | } | 
|  | 288 | ei->i_block_alloc_info = block_i; | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 | void ext3_discard_reservation(struct inode *inode) | 
|  | 292 | { | 
|  | 293 | struct ext3_inode_info *ei = EXT3_I(inode); | 
|  | 294 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | 
|  | 295 | struct ext3_reserve_window_node *rsv; | 
|  | 296 | spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock; | 
|  | 297 |  | 
|  | 298 | if (!block_i) | 
|  | 299 | return; | 
|  | 300 |  | 
|  | 301 | rsv = &block_i->rsv_window_node; | 
|  | 302 | if (!rsv_is_empty(&rsv->rsv_window)) { | 
|  | 303 | spin_lock(rsv_lock); | 
|  | 304 | if (!rsv_is_empty(&rsv->rsv_window)) | 
|  | 305 | rsv_window_remove(inode->i_sb, rsv); | 
|  | 306 | spin_unlock(rsv_lock); | 
|  | 307 | } | 
|  | 308 | } | 
|  | 309 |  | 
|  | 310 | /* Free given blocks, update quota and i_blocks field */ | 
|  | 311 | void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, | 
|  | 312 | unsigned long block, unsigned long count, | 
|  | 313 | int *pdquot_freed_blocks) | 
|  | 314 | { | 
|  | 315 | struct buffer_head *bitmap_bh = NULL; | 
|  | 316 | struct buffer_head *gd_bh; | 
|  | 317 | unsigned long block_group; | 
|  | 318 | unsigned long bit; | 
|  | 319 | unsigned long i; | 
|  | 320 | unsigned long overflow; | 
|  | 321 | struct ext3_group_desc * desc; | 
|  | 322 | struct ext3_super_block * es; | 
|  | 323 | struct ext3_sb_info *sbi; | 
|  | 324 | int err = 0, ret; | 
|  | 325 | unsigned group_freed; | 
|  | 326 |  | 
|  | 327 | *pdquot_freed_blocks = 0; | 
|  | 328 | sbi = EXT3_SB(sb); | 
|  | 329 | es = sbi->s_es; | 
|  | 330 | if (block < le32_to_cpu(es->s_first_data_block) || | 
|  | 331 | block + count < block || | 
|  | 332 | block + count > le32_to_cpu(es->s_blocks_count)) { | 
|  | 333 | ext3_error (sb, "ext3_free_blocks", | 
|  | 334 | "Freeing blocks not in datazone - " | 
|  | 335 | "block = %lu, count = %lu", block, count); | 
|  | 336 | goto error_return; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); | 
|  | 340 |  | 
|  | 341 | do_more: | 
|  | 342 | overflow = 0; | 
|  | 343 | block_group = (block - le32_to_cpu(es->s_first_data_block)) / | 
|  | 344 | EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 345 | bit = (block - le32_to_cpu(es->s_first_data_block)) % | 
|  | 346 | EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 347 | /* | 
|  | 348 | * Check to see if we are freeing blocks across a group | 
|  | 349 | * boundary. | 
|  | 350 | */ | 
|  | 351 | if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) { | 
|  | 352 | overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 353 | count -= overflow; | 
|  | 354 | } | 
|  | 355 | brelse(bitmap_bh); | 
|  | 356 | bitmap_bh = read_block_bitmap(sb, block_group); | 
|  | 357 | if (!bitmap_bh) | 
|  | 358 | goto error_return; | 
|  | 359 | desc = ext3_get_group_desc (sb, block_group, &gd_bh); | 
|  | 360 | if (!desc) | 
|  | 361 | goto error_return; | 
|  | 362 |  | 
|  | 363 | if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) || | 
|  | 364 | in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) || | 
|  | 365 | in_range (block, le32_to_cpu(desc->bg_inode_table), | 
|  | 366 | sbi->s_itb_per_group) || | 
|  | 367 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), | 
|  | 368 | sbi->s_itb_per_group)) | 
|  | 369 | ext3_error (sb, "ext3_free_blocks", | 
|  | 370 | "Freeing blocks in system zones - " | 
|  | 371 | "Block = %lu, count = %lu", | 
|  | 372 | block, count); | 
|  | 373 |  | 
|  | 374 | /* | 
|  | 375 | * We are about to start releasing blocks in the bitmap, | 
|  | 376 | * so we need undo access. | 
|  | 377 | */ | 
|  | 378 | /* @@@ check errors */ | 
|  | 379 | BUFFER_TRACE(bitmap_bh, "getting undo access"); | 
|  | 380 | err = ext3_journal_get_undo_access(handle, bitmap_bh); | 
|  | 381 | if (err) | 
|  | 382 | goto error_return; | 
|  | 383 |  | 
|  | 384 | /* | 
|  | 385 | * We are about to modify some metadata.  Call the journal APIs | 
|  | 386 | * to unshare ->b_data if a currently-committing transaction is | 
|  | 387 | * using it | 
|  | 388 | */ | 
|  | 389 | BUFFER_TRACE(gd_bh, "get_write_access"); | 
|  | 390 | err = ext3_journal_get_write_access(handle, gd_bh); | 
|  | 391 | if (err) | 
|  | 392 | goto error_return; | 
|  | 393 |  | 
|  | 394 | jbd_lock_bh_state(bitmap_bh); | 
|  | 395 |  | 
|  | 396 | for (i = 0, group_freed = 0; i < count; i++) { | 
|  | 397 | /* | 
|  | 398 | * An HJ special.  This is expensive... | 
|  | 399 | */ | 
|  | 400 | #ifdef CONFIG_JBD_DEBUG | 
|  | 401 | jbd_unlock_bh_state(bitmap_bh); | 
|  | 402 | { | 
|  | 403 | struct buffer_head *debug_bh; | 
|  | 404 | debug_bh = sb_find_get_block(sb, block + i); | 
|  | 405 | if (debug_bh) { | 
|  | 406 | BUFFER_TRACE(debug_bh, "Deleted!"); | 
|  | 407 | if (!bh2jh(bitmap_bh)->b_committed_data) | 
|  | 408 | BUFFER_TRACE(debug_bh, | 
|  | 409 | "No commited data in bitmap"); | 
|  | 410 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); | 
|  | 411 | __brelse(debug_bh); | 
|  | 412 | } | 
|  | 413 | } | 
|  | 414 | jbd_lock_bh_state(bitmap_bh); | 
|  | 415 | #endif | 
|  | 416 | if (need_resched()) { | 
|  | 417 | jbd_unlock_bh_state(bitmap_bh); | 
|  | 418 | cond_resched(); | 
|  | 419 | jbd_lock_bh_state(bitmap_bh); | 
|  | 420 | } | 
|  | 421 | /* @@@ This prevents newly-allocated data from being | 
|  | 422 | * freed and then reallocated within the same | 
|  | 423 | * transaction. | 
|  | 424 | * | 
|  | 425 | * Ideally we would want to allow that to happen, but to | 
|  | 426 | * do so requires making journal_forget() capable of | 
|  | 427 | * revoking the queued write of a data block, which | 
|  | 428 | * implies blocking on the journal lock.  *forget() | 
|  | 429 | * cannot block due to truncate races. | 
|  | 430 | * | 
|  | 431 | * Eventually we can fix this by making journal_forget() | 
|  | 432 | * return a status indicating whether or not it was able | 
|  | 433 | * to revoke the buffer.  On successful revoke, it is | 
|  | 434 | * safe not to set the allocation bit in the committed | 
|  | 435 | * bitmap, because we know that there is no outstanding | 
|  | 436 | * activity on the buffer any more and so it is safe to | 
|  | 437 | * reallocate it. | 
|  | 438 | */ | 
|  | 439 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); | 
|  | 440 | J_ASSERT_BH(bitmap_bh, | 
|  | 441 | bh2jh(bitmap_bh)->b_committed_data != NULL); | 
|  | 442 | ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, | 
|  | 443 | bh2jh(bitmap_bh)->b_committed_data); | 
|  | 444 |  | 
|  | 445 | /* | 
|  | 446 | * We clear the bit in the bitmap after setting the committed | 
|  | 447 | * data bit, because this is the reverse order to that which | 
|  | 448 | * the allocator uses. | 
|  | 449 | */ | 
|  | 450 | BUFFER_TRACE(bitmap_bh, "clear bit"); | 
|  | 451 | if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group), | 
|  | 452 | bit + i, bitmap_bh->b_data)) { | 
|  | 453 | jbd_unlock_bh_state(bitmap_bh); | 
|  | 454 | ext3_error(sb, __FUNCTION__, | 
|  | 455 | "bit already cleared for block %lu", block + i); | 
|  | 456 | jbd_lock_bh_state(bitmap_bh); | 
|  | 457 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | 
|  | 458 | } else { | 
|  | 459 | group_freed++; | 
|  | 460 | } | 
|  | 461 | } | 
|  | 462 | jbd_unlock_bh_state(bitmap_bh); | 
|  | 463 |  | 
|  | 464 | spin_lock(sb_bgl_lock(sbi, block_group)); | 
|  | 465 | desc->bg_free_blocks_count = | 
|  | 466 | cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + | 
|  | 467 | group_freed); | 
|  | 468 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 
|  | 469 | percpu_counter_mod(&sbi->s_freeblocks_counter, count); | 
|  | 470 |  | 
|  | 471 | /* We dirtied the bitmap block */ | 
|  | 472 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | 
|  | 473 | err = ext3_journal_dirty_metadata(handle, bitmap_bh); | 
|  | 474 |  | 
|  | 475 | /* And the group descriptor block */ | 
|  | 476 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | 
|  | 477 | ret = ext3_journal_dirty_metadata(handle, gd_bh); | 
|  | 478 | if (!err) err = ret; | 
|  | 479 | *pdquot_freed_blocks += group_freed; | 
|  | 480 |  | 
|  | 481 | if (overflow && !err) { | 
|  | 482 | block += count; | 
|  | 483 | count = overflow; | 
|  | 484 | goto do_more; | 
|  | 485 | } | 
|  | 486 | sb->s_dirt = 1; | 
|  | 487 | error_return: | 
|  | 488 | brelse(bitmap_bh); | 
|  | 489 | ext3_std_error(sb, err); | 
|  | 490 | return; | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | /* Free given blocks, update quota and i_blocks field */ | 
|  | 494 | void ext3_free_blocks(handle_t *handle, struct inode *inode, | 
|  | 495 | unsigned long block, unsigned long count) | 
|  | 496 | { | 
|  | 497 | struct super_block * sb; | 
|  | 498 | int dquot_freed_blocks; | 
|  | 499 |  | 
|  | 500 | sb = inode->i_sb; | 
|  | 501 | if (!sb) { | 
|  | 502 | printk ("ext3_free_blocks: nonexistent device"); | 
|  | 503 | return; | 
|  | 504 | } | 
|  | 505 | ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); | 
|  | 506 | if (dquot_freed_blocks) | 
|  | 507 | DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); | 
|  | 508 | return; | 
|  | 509 | } | 
|  | 510 |  | 
|  | 511 | /* | 
|  | 512 | * For ext3 allocations, we must not reuse any blocks which are | 
|  | 513 | * allocated in the bitmap buffer's "last committed data" copy.  This | 
|  | 514 | * prevents deletes from freeing up the page for reuse until we have | 
|  | 515 | * committed the delete transaction. | 
|  | 516 | * | 
|  | 517 | * If we didn't do this, then deleting something and reallocating it as | 
|  | 518 | * data would allow the old block to be overwritten before the | 
|  | 519 | * transaction committed (because we force data to disk before commit). | 
|  | 520 | * This would lead to corruption if we crashed between overwriting the | 
|  | 521 | * data and committing the delete. | 
|  | 522 | * | 
|  | 523 | * @@@ We may want to make this allocation behaviour conditional on | 
|  | 524 | * data-writes at some point, and disable it for metadata allocations or | 
|  | 525 | * sync-data inodes. | 
|  | 526 | */ | 
|  | 527 | static int ext3_test_allocatable(int nr, struct buffer_head *bh) | 
|  | 528 | { | 
|  | 529 | int ret; | 
|  | 530 | struct journal_head *jh = bh2jh(bh); | 
|  | 531 |  | 
|  | 532 | if (ext3_test_bit(nr, bh->b_data)) | 
|  | 533 | return 0; | 
|  | 534 |  | 
|  | 535 | jbd_lock_bh_state(bh); | 
|  | 536 | if (!jh->b_committed_data) | 
|  | 537 | ret = 1; | 
|  | 538 | else | 
|  | 539 | ret = !ext3_test_bit(nr, jh->b_committed_data); | 
|  | 540 | jbd_unlock_bh_state(bh); | 
|  | 541 | return ret; | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | static int | 
|  | 545 | bitmap_search_next_usable_block(int start, struct buffer_head *bh, | 
|  | 546 | int maxblocks) | 
|  | 547 | { | 
|  | 548 | int next; | 
|  | 549 | struct journal_head *jh = bh2jh(bh); | 
|  | 550 |  | 
|  | 551 | /* | 
|  | 552 | * The bitmap search --- search forward alternately through the actual | 
|  | 553 | * bitmap and the last-committed copy until we find a bit free in | 
|  | 554 | * both | 
|  | 555 | */ | 
|  | 556 | while (start < maxblocks) { | 
|  | 557 | next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); | 
|  | 558 | if (next >= maxblocks) | 
|  | 559 | return -1; | 
|  | 560 | if (ext3_test_allocatable(next, bh)) | 
|  | 561 | return next; | 
|  | 562 | jbd_lock_bh_state(bh); | 
|  | 563 | if (jh->b_committed_data) | 
|  | 564 | start = ext3_find_next_zero_bit(jh->b_committed_data, | 
|  | 565 | maxblocks, next); | 
|  | 566 | jbd_unlock_bh_state(bh); | 
|  | 567 | } | 
|  | 568 | return -1; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | /* | 
|  | 572 | * Find an allocatable block in a bitmap.  We honour both the bitmap and | 
|  | 573 | * its last-committed copy (if that exists), and perform the "most | 
|  | 574 | * appropriate allocation" algorithm of looking for a free block near | 
|  | 575 | * the initial goal; then for a free byte somewhere in the bitmap; then | 
|  | 576 | * for any free bit in the bitmap. | 
|  | 577 | */ | 
|  | 578 | static int | 
|  | 579 | find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) | 
|  | 580 | { | 
|  | 581 | int here, next; | 
|  | 582 | char *p, *r; | 
|  | 583 |  | 
|  | 584 | if (start > 0) { | 
|  | 585 | /* | 
|  | 586 | * The goal was occupied; search forward for a free | 
|  | 587 | * block within the next XX blocks. | 
|  | 588 | * | 
|  | 589 | * end_goal is more or less random, but it has to be | 
|  | 590 | * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the | 
|  | 591 | * next 64-bit boundary is simple.. | 
|  | 592 | */ | 
|  | 593 | int end_goal = (start + 63) & ~63; | 
|  | 594 | if (end_goal > maxblocks) | 
|  | 595 | end_goal = maxblocks; | 
|  | 596 | here = ext3_find_next_zero_bit(bh->b_data, end_goal, start); | 
|  | 597 | if (here < end_goal && ext3_test_allocatable(here, bh)) | 
|  | 598 | return here; | 
|  | 599 | ext3_debug("Bit not found near goal\n"); | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | here = start; | 
|  | 603 | if (here < 0) | 
|  | 604 | here = 0; | 
|  | 605 |  | 
|  | 606 | p = ((char *)bh->b_data) + (here >> 3); | 
|  | 607 | r = memscan(p, 0, (maxblocks - here + 7) >> 3); | 
|  | 608 | next = (r - ((char *)bh->b_data)) << 3; | 
|  | 609 |  | 
|  | 610 | if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) | 
|  | 611 | return next; | 
|  | 612 |  | 
|  | 613 | /* | 
|  | 614 | * The bitmap search --- search forward alternately through the actual | 
|  | 615 | * bitmap and the last-committed copy until we find a bit free in | 
|  | 616 | * both | 
|  | 617 | */ | 
|  | 618 | here = bitmap_search_next_usable_block(here, bh, maxblocks); | 
|  | 619 | return here; | 
|  | 620 | } | 
|  | 621 |  | 
|  | 622 | /* | 
|  | 623 | * We think we can allocate this block in this bitmap.  Try to set the bit. | 
|  | 624 | * If that succeeds then check that nobody has allocated and then freed the | 
|  | 625 | * block since we saw that is was not marked in b_committed_data.  If it _was_ | 
|  | 626 | * allocated and freed then clear the bit in the bitmap again and return | 
|  | 627 | * zero (failure). | 
|  | 628 | */ | 
|  | 629 | static inline int | 
|  | 630 | claim_block(spinlock_t *lock, int block, struct buffer_head *bh) | 
|  | 631 | { | 
|  | 632 | struct journal_head *jh = bh2jh(bh); | 
|  | 633 | int ret; | 
|  | 634 |  | 
|  | 635 | if (ext3_set_bit_atomic(lock, block, bh->b_data)) | 
|  | 636 | return 0; | 
|  | 637 | jbd_lock_bh_state(bh); | 
|  | 638 | if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) { | 
|  | 639 | ext3_clear_bit_atomic(lock, block, bh->b_data); | 
|  | 640 | ret = 0; | 
|  | 641 | } else { | 
|  | 642 | ret = 1; | 
|  | 643 | } | 
|  | 644 | jbd_unlock_bh_state(bh); | 
|  | 645 | return ret; | 
|  | 646 | } | 
|  | 647 |  | 
|  | 648 | /* | 
|  | 649 | * If we failed to allocate the desired block then we may end up crossing to a | 
|  | 650 | * new bitmap.  In that case we must release write access to the old one via | 
|  | 651 | * ext3_journal_release_buffer(), else we'll run out of credits. | 
|  | 652 | */ | 
|  | 653 | static int | 
|  | 654 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | 
|  | 655 | struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv) | 
|  | 656 | { | 
|  | 657 | int group_first_block, start, end; | 
|  | 658 |  | 
|  | 659 | /* we do allocation within the reservation window if we have a window */ | 
|  | 660 | if (my_rsv) { | 
|  | 661 | group_first_block = | 
|  | 662 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | 
|  | 663 | group * EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 664 | if (my_rsv->_rsv_start >= group_first_block) | 
|  | 665 | start = my_rsv->_rsv_start - group_first_block; | 
|  | 666 | else | 
|  | 667 | /* reservation window cross group boundary */ | 
|  | 668 | start = 0; | 
|  | 669 | end = my_rsv->_rsv_end - group_first_block + 1; | 
|  | 670 | if (end > EXT3_BLOCKS_PER_GROUP(sb)) | 
|  | 671 | /* reservation window crosses group boundary */ | 
|  | 672 | end = EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 673 | if ((start <= goal) && (goal < end)) | 
|  | 674 | start = goal; | 
|  | 675 | else | 
|  | 676 | goal = -1; | 
|  | 677 | } else { | 
|  | 678 | if (goal > 0) | 
|  | 679 | start = goal; | 
|  | 680 | else | 
|  | 681 | start = 0; | 
|  | 682 | end = EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 683 | } | 
|  | 684 |  | 
|  | 685 | BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb)); | 
|  | 686 |  | 
|  | 687 | repeat: | 
|  | 688 | if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) { | 
|  | 689 | goal = find_next_usable_block(start, bitmap_bh, end); | 
|  | 690 | if (goal < 0) | 
|  | 691 | goto fail_access; | 
|  | 692 | if (!my_rsv) { | 
|  | 693 | int i; | 
|  | 694 |  | 
|  | 695 | for (i = 0; i < 7 && goal > start && | 
|  | 696 | ext3_test_allocatable(goal - 1, | 
|  | 697 | bitmap_bh); | 
|  | 698 | i++, goal--) | 
|  | 699 | ; | 
|  | 700 | } | 
|  | 701 | } | 
|  | 702 | start = goal; | 
|  | 703 |  | 
|  | 704 | if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { | 
|  | 705 | /* | 
|  | 706 | * The block was allocated by another thread, or it was | 
|  | 707 | * allocated and then freed by another thread | 
|  | 708 | */ | 
|  | 709 | start++; | 
|  | 710 | goal++; | 
|  | 711 | if (start >= end) | 
|  | 712 | goto fail_access; | 
|  | 713 | goto repeat; | 
|  | 714 | } | 
|  | 715 | return goal; | 
|  | 716 | fail_access: | 
|  | 717 | return -1; | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | /** | 
|  | 721 | * 	find_next_reservable_window(): | 
|  | 722 | *		find a reservable space within the given range. | 
|  | 723 | *		It does not allocate the reservation window for now: | 
|  | 724 | *		alloc_new_reservation() will do the work later. | 
|  | 725 | * | 
|  | 726 | * 	@search_head: the head of the searching list; | 
|  | 727 | *		This is not necessarily the list head of the whole filesystem | 
|  | 728 | * | 
|  | 729 | *		We have both head and start_block to assist the search | 
|  | 730 | *		for the reservable space. The list starts from head, | 
|  | 731 | *		but we will shift to the place where start_block is, | 
|  | 732 | *		then start from there, when looking for a reservable space. | 
|  | 733 | * | 
|  | 734 | * 	@size: the target new reservation window size | 
|  | 735 | * | 
|  | 736 | * 	@group_first_block: the first block we consider to start | 
|  | 737 | *			the real search from | 
|  | 738 | * | 
|  | 739 | * 	@last_block: | 
|  | 740 | *		the maximum block number that our goal reservable space | 
|  | 741 | *		could start from. This is normally the last block in this | 
|  | 742 | *		group. The search will end when we found the start of next | 
|  | 743 | *		possible reservable space is out of this boundary. | 
|  | 744 | *		This could handle the cross boundary reservation window | 
|  | 745 | *		request. | 
|  | 746 | * | 
|  | 747 | * 	basically we search from the given range, rather than the whole | 
|  | 748 | * 	reservation double linked list, (start_block, last_block) | 
|  | 749 | * 	to find a free region that is of my size and has not | 
|  | 750 | * 	been reserved. | 
|  | 751 | * | 
|  | 752 | *	on succeed, it returns the reservation window to be appended to. | 
|  | 753 | *	failed, return NULL. | 
|  | 754 | */ | 
|  | 755 | static struct ext3_reserve_window_node *find_next_reservable_window( | 
|  | 756 | struct ext3_reserve_window_node *search_head, | 
|  | 757 | unsigned long size, int *start_block, | 
|  | 758 | int last_block) | 
|  | 759 | { | 
|  | 760 | struct rb_node *next; | 
|  | 761 | struct ext3_reserve_window_node *rsv, *prev; | 
|  | 762 | int cur; | 
|  | 763 |  | 
|  | 764 | /* TODO: make the start of the reservation window byte-aligned */ | 
|  | 765 | /* cur = *start_block & ~7;*/ | 
|  | 766 | cur = *start_block; | 
|  | 767 | rsv = search_head; | 
|  | 768 | if (!rsv) | 
|  | 769 | return NULL; | 
|  | 770 |  | 
|  | 771 | while (1) { | 
|  | 772 | if (cur <= rsv->rsv_end) | 
|  | 773 | cur = rsv->rsv_end + 1; | 
|  | 774 |  | 
|  | 775 | /* TODO? | 
|  | 776 | * in the case we could not find a reservable space | 
|  | 777 | * that is what is expected, during the re-search, we could | 
|  | 778 | * remember what's the largest reservable space we could have | 
|  | 779 | * and return that one. | 
|  | 780 | * | 
|  | 781 | * For now it will fail if we could not find the reservable | 
|  | 782 | * space with expected-size (or more)... | 
|  | 783 | */ | 
|  | 784 | if (cur > last_block) | 
|  | 785 | return NULL;		/* fail */ | 
|  | 786 |  | 
|  | 787 | prev = rsv; | 
|  | 788 | next = rb_next(&rsv->rsv_node); | 
|  | 789 | rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); | 
|  | 790 |  | 
|  | 791 | /* | 
|  | 792 | * Reached the last reservation, we can just append to the | 
|  | 793 | * previous one. | 
|  | 794 | */ | 
|  | 795 | if (!next) | 
|  | 796 | break; | 
|  | 797 |  | 
|  | 798 | if (cur + size <= rsv->rsv_start) { | 
|  | 799 | /* | 
|  | 800 | * Found a reserveable space big enough.  We could | 
|  | 801 | * have a reservation across the group boundary here | 
|  | 802 | */ | 
|  | 803 | break; | 
|  | 804 | } | 
|  | 805 | } | 
|  | 806 | /* | 
|  | 807 | * we come here either : | 
|  | 808 | * when we reach the end of the whole list, | 
|  | 809 | * and there is empty reservable space after last entry in the list. | 
|  | 810 | * append it to the end of the list. | 
|  | 811 | * | 
|  | 812 | * or we found one reservable space in the middle of the list, | 
|  | 813 | * return the reservation window that we could append to. | 
|  | 814 | * succeed. | 
|  | 815 | */ | 
|  | 816 | *start_block = cur; | 
|  | 817 | return prev; | 
|  | 818 | } | 
|  | 819 |  | 
|  | 820 | /** | 
|  | 821 | * 	alloc_new_reservation()--allocate a new reservation window | 
|  | 822 | * | 
|  | 823 | *		To make a new reservation, we search part of the filesystem | 
|  | 824 | *		reservation list (the list that inside the group). We try to | 
|  | 825 | *		allocate a new reservation window near the allocation goal, | 
|  | 826 | *		or the beginning of the group, if there is no goal. | 
|  | 827 | * | 
|  | 828 | *		We first find a reservable space after the goal, then from | 
|  | 829 | *		there, we check the bitmap for the first free block after | 
|  | 830 | *		it. If there is no free block until the end of group, then the | 
|  | 831 | *		whole group is full, we failed. Otherwise, check if the free | 
|  | 832 | *		block is inside the expected reservable space, if so, we | 
|  | 833 | *		succeed. | 
|  | 834 | *		If the first free block is outside the reservable space, then | 
|  | 835 | *		start from the first free block, we search for next available | 
|  | 836 | *		space, and go on. | 
|  | 837 | * | 
|  | 838 | *	on succeed, a new reservation will be found and inserted into the list | 
|  | 839 | *	It contains at least one free block, and it does not overlap with other | 
|  | 840 | *	reservation windows. | 
|  | 841 | * | 
|  | 842 | *	failed: we failed to find a reservation window in this group | 
|  | 843 | * | 
|  | 844 | *	@rsv: the reservation | 
|  | 845 | * | 
|  | 846 | *	@goal: The goal (group-relative).  It is where the search for a | 
|  | 847 | *		free reservable space should start from. | 
|  | 848 | *		if we have a goal(goal >0 ), then start from there, | 
|  | 849 | *		no goal(goal = -1), we start from the first block | 
|  | 850 | *		of the group. | 
|  | 851 | * | 
|  | 852 | *	@sb: the super block | 
|  | 853 | *	@group: the group we are trying to allocate in | 
|  | 854 | *	@bitmap_bh: the block group block bitmap | 
|  | 855 | */ | 
|  | 856 | static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, | 
|  | 857 | int goal, struct super_block *sb, | 
|  | 858 | unsigned int group, struct buffer_head *bitmap_bh) | 
|  | 859 | { | 
|  | 860 | struct ext3_reserve_window_node *search_head; | 
|  | 861 | int group_first_block, group_end_block, start_block; | 
|  | 862 | int first_free_block; | 
|  | 863 | int reservable_space_start; | 
|  | 864 | struct ext3_reserve_window_node *prev_rsv; | 
|  | 865 | struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root; | 
|  | 866 | unsigned long size; | 
|  | 867 |  | 
|  | 868 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | 
|  | 869 | group * EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 870 | group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | 
|  | 871 |  | 
|  | 872 | if (goal < 0) | 
|  | 873 | start_block = group_first_block; | 
|  | 874 | else | 
|  | 875 | start_block = goal + group_first_block; | 
|  | 876 |  | 
|  | 877 | size = my_rsv->rsv_goal_size; | 
|  | 878 | if (!rsv_is_empty(&my_rsv->rsv_window)) { | 
|  | 879 | /* | 
|  | 880 | * if the old reservation is cross group boundary | 
|  | 881 | * and if the goal is inside the old reservation window, | 
|  | 882 | * we will come here when we just failed to allocate from | 
|  | 883 | * the first part of the window. We still have another part | 
|  | 884 | * that belongs to the next group. In this case, there is no | 
|  | 885 | * point to discard our window and try to allocate a new one | 
|  | 886 | * in this group(which will fail). we should | 
|  | 887 | * keep the reservation window, just simply move on. | 
|  | 888 | * | 
|  | 889 | * Maybe we could shift the start block of the reservation | 
|  | 890 | * window to the first block of next group. | 
|  | 891 | */ | 
|  | 892 |  | 
|  | 893 | if ((my_rsv->rsv_start <= group_end_block) && | 
|  | 894 | (my_rsv->rsv_end > group_end_block) && | 
|  | 895 | (start_block >= my_rsv->rsv_start)) | 
|  | 896 | return -1; | 
|  | 897 |  | 
|  | 898 | if ((my_rsv->rsv_alloc_hit > | 
|  | 899 | (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { | 
|  | 900 | /* | 
|  | 901 | * if we previously allocation hit ration is greater than half | 
|  | 902 | * we double the size of reservation window next time | 
|  | 903 | * otherwise keep the same | 
|  | 904 | */ | 
|  | 905 | size = size * 2; | 
|  | 906 | if (size > EXT3_MAX_RESERVE_BLOCKS) | 
|  | 907 | size = EXT3_MAX_RESERVE_BLOCKS; | 
|  | 908 | my_rsv->rsv_goal_size= size; | 
|  | 909 | } | 
|  | 910 | } | 
|  | 911 | /* | 
|  | 912 | * shift the search start to the window near the goal block | 
|  | 913 | */ | 
|  | 914 | search_head = search_reserve_window(fs_rsv_root, start_block); | 
|  | 915 |  | 
|  | 916 | /* | 
|  | 917 | * find_next_reservable_window() simply finds a reservable window | 
|  | 918 | * inside the given range(start_block, group_end_block). | 
|  | 919 | * | 
|  | 920 | * To make sure the reservation window has a free bit inside it, we | 
|  | 921 | * need to check the bitmap after we found a reservable window. | 
|  | 922 | */ | 
|  | 923 | retry: | 
|  | 924 | prev_rsv = find_next_reservable_window(search_head, size, | 
|  | 925 | &start_block, group_end_block); | 
|  | 926 | if (prev_rsv == NULL) | 
|  | 927 | goto failed; | 
|  | 928 | reservable_space_start = start_block; | 
|  | 929 | /* | 
|  | 930 | * On success, find_next_reservable_window() returns the | 
|  | 931 | * reservation window where there is a reservable space after it. | 
|  | 932 | * Before we reserve this reservable space, we need | 
|  | 933 | * to make sure there is at least a free block inside this region. | 
|  | 934 | * | 
|  | 935 | * searching the first free bit on the block bitmap and copy of | 
|  | 936 | * last committed bitmap alternatively, until we found a allocatable | 
|  | 937 | * block. Search start from the start block of the reservable space | 
|  | 938 | * we just found. | 
|  | 939 | */ | 
|  | 940 | first_free_block = bitmap_search_next_usable_block( | 
|  | 941 | reservable_space_start - group_first_block, | 
|  | 942 | bitmap_bh, group_end_block - group_first_block + 1); | 
|  | 943 |  | 
|  | 944 | if (first_free_block < 0) { | 
|  | 945 | /* | 
|  | 946 | * no free block left on the bitmap, no point | 
|  | 947 | * to reserve the space. return failed. | 
|  | 948 | */ | 
|  | 949 | goto failed; | 
|  | 950 | } | 
|  | 951 | start_block = first_free_block + group_first_block; | 
|  | 952 | /* | 
|  | 953 | * check if the first free block is within the | 
|  | 954 | * free space we just found | 
|  | 955 | */ | 
|  | 956 | if ((start_block >= reservable_space_start) && | 
|  | 957 | (start_block < reservable_space_start + size)) | 
|  | 958 | goto found_rsv_window; | 
|  | 959 | /* | 
|  | 960 | * if the first free bit we found is out of the reservable space | 
|  | 961 | * this means there is no free block on the reservable space | 
|  | 962 | * we should continue search for next reservable space, | 
|  | 963 | * start from where the free block is, | 
|  | 964 | * we also shift the list head to where we stopped last time | 
|  | 965 | */ | 
|  | 966 | search_head = prev_rsv; | 
|  | 967 | goto retry; | 
|  | 968 |  | 
|  | 969 | found_rsv_window: | 
|  | 970 | /* | 
|  | 971 | * great! the reservable space contains some free blocks. | 
|  | 972 | * if the search returns that we should add the new | 
|  | 973 | * window just next to where the old window, we don't | 
|  | 974 | * need to remove the old window first then add it to the | 
|  | 975 | * same place, just update the new start and new end. | 
|  | 976 | */ | 
|  | 977 | if (my_rsv != prev_rsv)  { | 
|  | 978 | if (!rsv_is_empty(&my_rsv->rsv_window)) | 
|  | 979 | rsv_window_remove(sb, my_rsv); | 
|  | 980 | } | 
|  | 981 | my_rsv->rsv_start = reservable_space_start; | 
|  | 982 | my_rsv->rsv_end = my_rsv->rsv_start + size - 1; | 
|  | 983 | my_rsv->rsv_alloc_hit = 0; | 
|  | 984 | if (my_rsv != prev_rsv)  { | 
|  | 985 | ext3_rsv_window_add(sb, my_rsv); | 
|  | 986 | } | 
|  | 987 | return 0;		/* succeed */ | 
|  | 988 | failed: | 
|  | 989 | /* | 
|  | 990 | * failed to find a new reservation window in the current | 
|  | 991 | * group, remove the current(stale) reservation window | 
|  | 992 | * if there is any | 
|  | 993 | */ | 
|  | 994 | if (!rsv_is_empty(&my_rsv->rsv_window)) | 
|  | 995 | rsv_window_remove(sb, my_rsv); | 
|  | 996 | return -1;		/* failed */ | 
|  | 997 | } | 
|  | 998 |  | 
|  | 999 | /* | 
|  | 1000 | * This is the main function used to allocate a new block and its reservation | 
|  | 1001 | * window. | 
|  | 1002 | * | 
|  | 1003 | * Each time when a new block allocation is need, first try to allocate from | 
|  | 1004 | * its own reservation.  If it does not have a reservation window, instead of | 
|  | 1005 | * looking for a free bit on bitmap first, then look up the reservation list to | 
|  | 1006 | * see if it is inside somebody else's reservation window, we try to allocate a | 
|  | 1007 | * reservation window for it starting from the goal first. Then do the block | 
|  | 1008 | * allocation within the reservation window. | 
|  | 1009 | * | 
|  | 1010 | * This will avoid keeping on searching the reservation list again and | 
|  | 1011 | * again when someboday is looking for a free block (without | 
|  | 1012 | * reservation), and there are lots of free blocks, but they are all | 
|  | 1013 | * being reserved. | 
|  | 1014 | * | 
|  | 1015 | * We use a sorted double linked list for the per-filesystem reservation list. | 
|  | 1016 | * The insert, remove and find a free space(non-reserved) operations for the | 
|  | 1017 | * sorted double linked list should be fast. | 
|  | 1018 | * | 
|  | 1019 | */ | 
|  | 1020 | static int | 
|  | 1021 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | 
|  | 1022 | unsigned int group, struct buffer_head *bitmap_bh, | 
|  | 1023 | int goal, struct ext3_reserve_window_node * my_rsv, | 
|  | 1024 | int *errp) | 
|  | 1025 | { | 
|  | 1026 | spinlock_t *rsv_lock; | 
|  | 1027 | unsigned long group_first_block; | 
|  | 1028 | int ret = 0; | 
|  | 1029 | int fatal; | 
|  | 1030 |  | 
|  | 1031 | *errp = 0; | 
|  | 1032 |  | 
|  | 1033 | /* | 
|  | 1034 | * Make sure we use undo access for the bitmap, because it is critical | 
|  | 1035 | * that we do the frozen_data COW on bitmap buffers in all cases even | 
|  | 1036 | * if the buffer is in BJ_Forget state in the committing transaction. | 
|  | 1037 | */ | 
|  | 1038 | BUFFER_TRACE(bitmap_bh, "get undo access for new block"); | 
|  | 1039 | fatal = ext3_journal_get_undo_access(handle, bitmap_bh); | 
|  | 1040 | if (fatal) { | 
|  | 1041 | *errp = fatal; | 
|  | 1042 | return -1; | 
|  | 1043 | } | 
|  | 1044 |  | 
|  | 1045 | /* | 
|  | 1046 | * we don't deal with reservation when | 
|  | 1047 | * filesystem is mounted without reservation | 
|  | 1048 | * or the file is not a regular file | 
|  | 1049 | * or last attempt to allocate a block with reservation turned on failed | 
|  | 1050 | */ | 
|  | 1051 | if (my_rsv == NULL ) { | 
|  | 1052 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); | 
|  | 1053 | goto out; | 
|  | 1054 | } | 
|  | 1055 | rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | 
|  | 1056 | /* | 
|  | 1057 | * goal is a group relative block number (if there is a goal) | 
|  | 1058 | * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb) | 
|  | 1059 | * first block is a filesystem wide block number | 
|  | 1060 | * first block is the block number of the first block in this group | 
|  | 1061 | */ | 
|  | 1062 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | 
|  | 1063 | group * EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 1064 |  | 
|  | 1065 | /* | 
|  | 1066 | * Basically we will allocate a new block from inode's reservation | 
|  | 1067 | * window. | 
|  | 1068 | * | 
|  | 1069 | * We need to allocate a new reservation window, if: | 
|  | 1070 | * a) inode does not have a reservation window; or | 
|  | 1071 | * b) last attempt to allocate a block from existing reservation | 
|  | 1072 | *    failed; or | 
|  | 1073 | * c) we come here with a goal and with a reservation window | 
|  | 1074 | * | 
|  | 1075 | * We do not need to allocate a new reservation window if we come here | 
|  | 1076 | * at the beginning with a goal and the goal is inside the window, or | 
|  | 1077 | * we don't have a goal but already have a reservation window. | 
|  | 1078 | * then we could go to allocate from the reservation window directly. | 
|  | 1079 | */ | 
|  | 1080 | while (1) { | 
|  | 1081 | struct ext3_reserve_window rsv_copy; | 
|  | 1082 |  | 
|  | 1083 | rsv_copy._rsv_start = my_rsv->rsv_start; | 
|  | 1084 | rsv_copy._rsv_end = my_rsv->rsv_end; | 
|  | 1085 |  | 
|  | 1086 | if (rsv_is_empty(&rsv_copy) || (ret < 0) || | 
|  | 1087 | !goal_in_my_reservation(&rsv_copy, goal, group, sb)) { | 
|  | 1088 | spin_lock(rsv_lock); | 
|  | 1089 | ret = alloc_new_reservation(my_rsv, goal, sb, | 
|  | 1090 | group, bitmap_bh); | 
|  | 1091 | rsv_copy._rsv_start = my_rsv->rsv_start; | 
|  | 1092 | rsv_copy._rsv_end = my_rsv->rsv_end; | 
|  | 1093 | spin_unlock(rsv_lock); | 
|  | 1094 | if (ret < 0) | 
|  | 1095 | break;			/* failed */ | 
|  | 1096 |  | 
|  | 1097 | if (!goal_in_my_reservation(&rsv_copy, goal, group, sb)) | 
|  | 1098 | goal = -1; | 
|  | 1099 | } | 
|  | 1100 | if ((rsv_copy._rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) | 
|  | 1101 | || (rsv_copy._rsv_end < group_first_block)) | 
|  | 1102 | BUG(); | 
|  | 1103 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, | 
|  | 1104 | &rsv_copy); | 
|  | 1105 | if (ret >= 0) { | 
|  | 1106 | my_rsv->rsv_alloc_hit++; | 
|  | 1107 | break;				/* succeed */ | 
|  | 1108 | } | 
|  | 1109 | } | 
|  | 1110 | out: | 
|  | 1111 | if (ret >= 0) { | 
|  | 1112 | BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " | 
|  | 1113 | "bitmap block"); | 
|  | 1114 | fatal = ext3_journal_dirty_metadata(handle, bitmap_bh); | 
|  | 1115 | if (fatal) { | 
|  | 1116 | *errp = fatal; | 
|  | 1117 | return -1; | 
|  | 1118 | } | 
|  | 1119 | return ret; | 
|  | 1120 | } | 
|  | 1121 |  | 
|  | 1122 | BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); | 
|  | 1123 | ext3_journal_release_buffer(handle, bitmap_bh); | 
|  | 1124 | return ret; | 
|  | 1125 | } | 
|  | 1126 |  | 
|  | 1127 | static int ext3_has_free_blocks(struct ext3_sb_info *sbi) | 
|  | 1128 | { | 
|  | 1129 | int free_blocks, root_blocks; | 
|  | 1130 |  | 
|  | 1131 | free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); | 
|  | 1132 | root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); | 
|  | 1133 | if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && | 
|  | 1134 | sbi->s_resuid != current->fsuid && | 
|  | 1135 | (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { | 
|  | 1136 | return 0; | 
|  | 1137 | } | 
|  | 1138 | return 1; | 
|  | 1139 | } | 
|  | 1140 |  | 
|  | 1141 | /* | 
|  | 1142 | * ext3_should_retry_alloc() is called when ENOSPC is returned, and if | 
|  | 1143 | * it is profitable to retry the operation, this function will wait | 
|  | 1144 | * for the current or commiting transaction to complete, and then | 
|  | 1145 | * return TRUE. | 
|  | 1146 | */ | 
|  | 1147 | int ext3_should_retry_alloc(struct super_block *sb, int *retries) | 
|  | 1148 | { | 
|  | 1149 | if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3) | 
|  | 1150 | return 0; | 
|  | 1151 |  | 
|  | 1152 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); | 
|  | 1153 |  | 
|  | 1154 | return journal_force_commit_nested(EXT3_SB(sb)->s_journal); | 
|  | 1155 | } | 
|  | 1156 |  | 
|  | 1157 | /* | 
|  | 1158 | * ext3_new_block uses a goal block to assist allocation.  If the goal is | 
|  | 1159 | * free, or there is a free block within 32 blocks of the goal, that block | 
|  | 1160 | * is allocated.  Otherwise a forward search is made for a free block; within | 
|  | 1161 | * each block group the search first looks for an entire free byte in the block | 
|  | 1162 | * bitmap, and then for any free bit if that fails. | 
|  | 1163 | * This function also updates quota and i_blocks field. | 
|  | 1164 | */ | 
|  | 1165 | int ext3_new_block(handle_t *handle, struct inode *inode, | 
|  | 1166 | unsigned long goal, int *errp) | 
|  | 1167 | { | 
|  | 1168 | struct buffer_head *bitmap_bh = NULL; | 
|  | 1169 | struct buffer_head *gdp_bh; | 
|  | 1170 | int group_no; | 
|  | 1171 | int goal_group; | 
|  | 1172 | int ret_block; | 
|  | 1173 | int bgi;			/* blockgroup iteration index */ | 
|  | 1174 | int target_block; | 
|  | 1175 | int fatal = 0, err; | 
|  | 1176 | int performed_allocation = 0; | 
|  | 1177 | int free_blocks; | 
|  | 1178 | struct super_block *sb; | 
|  | 1179 | struct ext3_group_desc *gdp; | 
|  | 1180 | struct ext3_super_block *es; | 
|  | 1181 | struct ext3_sb_info *sbi; | 
|  | 1182 | struct ext3_reserve_window_node *my_rsv = NULL; | 
|  | 1183 | struct ext3_block_alloc_info *block_i; | 
|  | 1184 | unsigned short windowsz = 0; | 
|  | 1185 | #ifdef EXT3FS_DEBUG | 
|  | 1186 | static int goal_hits, goal_attempts; | 
|  | 1187 | #endif | 
|  | 1188 | unsigned long ngroups; | 
|  | 1189 |  | 
|  | 1190 | *errp = -ENOSPC; | 
|  | 1191 | sb = inode->i_sb; | 
|  | 1192 | if (!sb) { | 
|  | 1193 | printk("ext3_new_block: nonexistent device"); | 
|  | 1194 | return 0; | 
|  | 1195 | } | 
|  | 1196 |  | 
|  | 1197 | /* | 
|  | 1198 | * Check quota for allocation of this block. | 
|  | 1199 | */ | 
|  | 1200 | if (DQUOT_ALLOC_BLOCK(inode, 1)) { | 
|  | 1201 | *errp = -EDQUOT; | 
|  | 1202 | return 0; | 
|  | 1203 | } | 
|  | 1204 |  | 
|  | 1205 | sbi = EXT3_SB(sb); | 
|  | 1206 | es = EXT3_SB(sb)->s_es; | 
|  | 1207 | ext3_debug("goal=%lu.\n", goal); | 
|  | 1208 | /* | 
|  | 1209 | * Allocate a block from reservation only when | 
|  | 1210 | * filesystem is mounted with reservation(default,-o reservation), and | 
|  | 1211 | * it's a regular file, and | 
|  | 1212 | * the desired window size is greater than 0 (One could use ioctl | 
|  | 1213 | * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off | 
|  | 1214 | * reservation on that particular file) | 
|  | 1215 | */ | 
|  | 1216 | block_i = EXT3_I(inode)->i_block_alloc_info; | 
|  | 1217 | if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) | 
|  | 1218 | my_rsv = &block_i->rsv_window_node; | 
|  | 1219 |  | 
|  | 1220 | if (!ext3_has_free_blocks(sbi)) { | 
|  | 1221 | *errp = -ENOSPC; | 
|  | 1222 | goto out; | 
|  | 1223 | } | 
|  | 1224 |  | 
|  | 1225 | /* | 
|  | 1226 | * First, test whether the goal block is free. | 
|  | 1227 | */ | 
|  | 1228 | if (goal < le32_to_cpu(es->s_first_data_block) || | 
|  | 1229 | goal >= le32_to_cpu(es->s_blocks_count)) | 
|  | 1230 | goal = le32_to_cpu(es->s_first_data_block); | 
|  | 1231 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / | 
|  | 1232 | EXT3_BLOCKS_PER_GROUP(sb); | 
|  | 1233 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | 
|  | 1234 | if (!gdp) | 
|  | 1235 | goto io_error; | 
|  | 1236 |  | 
|  | 1237 | goal_group = group_no; | 
|  | 1238 | retry: | 
|  | 1239 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | 
|  | 1240 | /* | 
|  | 1241 | * if there is not enough free blocks to make a new resevation | 
|  | 1242 | * turn off reservation for this allocation | 
|  | 1243 | */ | 
|  | 1244 | if (my_rsv && (free_blocks < windowsz) | 
|  | 1245 | && (rsv_is_empty(&my_rsv->rsv_window))) | 
|  | 1246 | my_rsv = NULL; | 
|  | 1247 |  | 
|  | 1248 | if (free_blocks > 0) { | 
|  | 1249 | ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) % | 
|  | 1250 | EXT3_BLOCKS_PER_GROUP(sb)); | 
|  | 1251 | bitmap_bh = read_block_bitmap(sb, group_no); | 
|  | 1252 | if (!bitmap_bh) | 
|  | 1253 | goto io_error; | 
|  | 1254 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | 
|  | 1255 | bitmap_bh, ret_block, my_rsv, &fatal); | 
|  | 1256 | if (fatal) | 
|  | 1257 | goto out; | 
|  | 1258 | if (ret_block >= 0) | 
|  | 1259 | goto allocated; | 
|  | 1260 | } | 
|  | 1261 |  | 
|  | 1262 | ngroups = EXT3_SB(sb)->s_groups_count; | 
|  | 1263 | smp_rmb(); | 
|  | 1264 |  | 
|  | 1265 | /* | 
|  | 1266 | * Now search the rest of the groups.  We assume that | 
|  | 1267 | * i and gdp correctly point to the last group visited. | 
|  | 1268 | */ | 
|  | 1269 | for (bgi = 0; bgi < ngroups; bgi++) { | 
|  | 1270 | group_no++; | 
|  | 1271 | if (group_no >= ngroups) | 
|  | 1272 | group_no = 0; | 
|  | 1273 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | 
|  | 1274 | if (!gdp) { | 
|  | 1275 | *errp = -EIO; | 
|  | 1276 | goto out; | 
|  | 1277 | } | 
|  | 1278 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | 
|  | 1279 | /* | 
|  | 1280 | * skip this group if the number of | 
|  | 1281 | * free blocks is less than half of the reservation | 
|  | 1282 | * window size. | 
|  | 1283 | */ | 
|  | 1284 | if (free_blocks <= (windowsz/2)) | 
|  | 1285 | continue; | 
|  | 1286 |  | 
|  | 1287 | brelse(bitmap_bh); | 
|  | 1288 | bitmap_bh = read_block_bitmap(sb, group_no); | 
|  | 1289 | if (!bitmap_bh) | 
|  | 1290 | goto io_error; | 
|  | 1291 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | 
|  | 1292 | bitmap_bh, -1, my_rsv, &fatal); | 
|  | 1293 | if (fatal) | 
|  | 1294 | goto out; | 
|  | 1295 | if (ret_block >= 0) | 
|  | 1296 | goto allocated; | 
|  | 1297 | } | 
|  | 1298 | /* | 
|  | 1299 | * We may end up a bogus ealier ENOSPC error due to | 
|  | 1300 | * filesystem is "full" of reservations, but | 
|  | 1301 | * there maybe indeed free blocks avaliable on disk | 
|  | 1302 | * In this case, we just forget about the reservations | 
|  | 1303 | * just do block allocation as without reservations. | 
|  | 1304 | */ | 
|  | 1305 | if (my_rsv) { | 
|  | 1306 | my_rsv = NULL; | 
|  | 1307 | group_no = goal_group; | 
|  | 1308 | goto retry; | 
|  | 1309 | } | 
|  | 1310 | /* No space left on the device */ | 
|  | 1311 | *errp = -ENOSPC; | 
|  | 1312 | goto out; | 
|  | 1313 |  | 
|  | 1314 | allocated: | 
|  | 1315 |  | 
|  | 1316 | ext3_debug("using block group %d(%d)\n", | 
|  | 1317 | group_no, gdp->bg_free_blocks_count); | 
|  | 1318 |  | 
|  | 1319 | BUFFER_TRACE(gdp_bh, "get_write_access"); | 
|  | 1320 | fatal = ext3_journal_get_write_access(handle, gdp_bh); | 
|  | 1321 | if (fatal) | 
|  | 1322 | goto out; | 
|  | 1323 |  | 
|  | 1324 | target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) | 
|  | 1325 | + le32_to_cpu(es->s_first_data_block); | 
|  | 1326 |  | 
|  | 1327 | if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || | 
|  | 1328 | target_block == le32_to_cpu(gdp->bg_inode_bitmap) || | 
|  | 1329 | in_range(target_block, le32_to_cpu(gdp->bg_inode_table), | 
|  | 1330 | EXT3_SB(sb)->s_itb_per_group)) | 
|  | 1331 | ext3_error(sb, "ext3_new_block", | 
|  | 1332 | "Allocating block in system zone - " | 
|  | 1333 | "block = %u", target_block); | 
|  | 1334 |  | 
|  | 1335 | performed_allocation = 1; | 
|  | 1336 |  | 
|  | 1337 | #ifdef CONFIG_JBD_DEBUG | 
|  | 1338 | { | 
|  | 1339 | struct buffer_head *debug_bh; | 
|  | 1340 |  | 
|  | 1341 | /* Record bitmap buffer state in the newly allocated block */ | 
|  | 1342 | debug_bh = sb_find_get_block(sb, target_block); | 
|  | 1343 | if (debug_bh) { | 
|  | 1344 | BUFFER_TRACE(debug_bh, "state when allocated"); | 
|  | 1345 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); | 
|  | 1346 | brelse(debug_bh); | 
|  | 1347 | } | 
|  | 1348 | } | 
|  | 1349 | jbd_lock_bh_state(bitmap_bh); | 
|  | 1350 | spin_lock(sb_bgl_lock(sbi, group_no)); | 
|  | 1351 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { | 
|  | 1352 | if (ext3_test_bit(ret_block, | 
|  | 1353 | bh2jh(bitmap_bh)->b_committed_data)) { | 
|  | 1354 | printk("%s: block was unexpectedly set in " | 
|  | 1355 | "b_committed_data\n", __FUNCTION__); | 
|  | 1356 | } | 
|  | 1357 | } | 
|  | 1358 | ext3_debug("found bit %d\n", ret_block); | 
|  | 1359 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 
|  | 1360 | jbd_unlock_bh_state(bitmap_bh); | 
|  | 1361 | #endif | 
|  | 1362 |  | 
|  | 1363 | /* ret_block was blockgroup-relative.  Now it becomes fs-relative */ | 
|  | 1364 | ret_block = target_block; | 
|  | 1365 |  | 
|  | 1366 | if (ret_block >= le32_to_cpu(es->s_blocks_count)) { | 
|  | 1367 | ext3_error(sb, "ext3_new_block", | 
|  | 1368 | "block(%d) >= blocks count(%d) - " | 
|  | 1369 | "block_group = %d, es == %p ", ret_block, | 
|  | 1370 | le32_to_cpu(es->s_blocks_count), group_no, es); | 
|  | 1371 | goto out; | 
|  | 1372 | } | 
|  | 1373 |  | 
|  | 1374 | /* | 
|  | 1375 | * It is up to the caller to add the new buffer to a journal | 
|  | 1376 | * list of some description.  We don't know in advance whether | 
|  | 1377 | * the caller wants to use it as metadata or data. | 
|  | 1378 | */ | 
|  | 1379 | ext3_debug("allocating block %d. Goal hits %d of %d.\n", | 
|  | 1380 | ret_block, goal_hits, goal_attempts); | 
|  | 1381 |  | 
|  | 1382 | spin_lock(sb_bgl_lock(sbi, group_no)); | 
|  | 1383 | gdp->bg_free_blocks_count = | 
|  | 1384 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); | 
|  | 1385 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 
|  | 1386 | percpu_counter_mod(&sbi->s_freeblocks_counter, -1); | 
|  | 1387 |  | 
|  | 1388 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | 
|  | 1389 | err = ext3_journal_dirty_metadata(handle, gdp_bh); | 
|  | 1390 | if (!fatal) | 
|  | 1391 | fatal = err; | 
|  | 1392 |  | 
|  | 1393 | sb->s_dirt = 1; | 
|  | 1394 | if (fatal) | 
|  | 1395 | goto out; | 
|  | 1396 |  | 
|  | 1397 | *errp = 0; | 
|  | 1398 | brelse(bitmap_bh); | 
|  | 1399 | return ret_block; | 
|  | 1400 |  | 
|  | 1401 | io_error: | 
|  | 1402 | *errp = -EIO; | 
|  | 1403 | out: | 
|  | 1404 | if (fatal) { | 
|  | 1405 | *errp = fatal; | 
|  | 1406 | ext3_std_error(sb, fatal); | 
|  | 1407 | } | 
|  | 1408 | /* | 
|  | 1409 | * Undo the block allocation | 
|  | 1410 | */ | 
|  | 1411 | if (!performed_allocation) | 
|  | 1412 | DQUOT_FREE_BLOCK(inode, 1); | 
|  | 1413 | brelse(bitmap_bh); | 
|  | 1414 | return 0; | 
|  | 1415 | } | 
|  | 1416 |  | 
|  | 1417 | unsigned long ext3_count_free_blocks(struct super_block *sb) | 
|  | 1418 | { | 
|  | 1419 | unsigned long desc_count; | 
|  | 1420 | struct ext3_group_desc *gdp; | 
|  | 1421 | int i; | 
|  | 1422 | unsigned long ngroups; | 
|  | 1423 | #ifdef EXT3FS_DEBUG | 
|  | 1424 | struct ext3_super_block *es; | 
|  | 1425 | unsigned long bitmap_count, x; | 
|  | 1426 | struct buffer_head *bitmap_bh = NULL; | 
|  | 1427 |  | 
|  | 1428 | lock_super(sb); | 
|  | 1429 | es = EXT3_SB(sb)->s_es; | 
|  | 1430 | desc_count = 0; | 
|  | 1431 | bitmap_count = 0; | 
|  | 1432 | gdp = NULL; | 
|  | 1433 | for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { | 
|  | 1434 | gdp = ext3_get_group_desc(sb, i, NULL); | 
|  | 1435 | if (!gdp) | 
|  | 1436 | continue; | 
|  | 1437 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | 
|  | 1438 | brelse(bitmap_bh); | 
|  | 1439 | bitmap_bh = read_block_bitmap(sb, i); | 
|  | 1440 | if (bitmap_bh == NULL) | 
|  | 1441 | continue; | 
|  | 1442 |  | 
|  | 1443 | x = ext3_count_free(bitmap_bh, sb->s_blocksize); | 
|  | 1444 | printk("group %d: stored = %d, counted = %lu\n", | 
|  | 1445 | i, le16_to_cpu(gdp->bg_free_blocks_count), x); | 
|  | 1446 | bitmap_count += x; | 
|  | 1447 | } | 
|  | 1448 | brelse(bitmap_bh); | 
|  | 1449 | printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n", | 
|  | 1450 | le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); | 
|  | 1451 | unlock_super(sb); | 
|  | 1452 | return bitmap_count; | 
|  | 1453 | #else | 
|  | 1454 | desc_count = 0; | 
|  | 1455 | ngroups = EXT3_SB(sb)->s_groups_count; | 
|  | 1456 | smp_rmb(); | 
|  | 1457 | for (i = 0; i < ngroups; i++) { | 
|  | 1458 | gdp = ext3_get_group_desc(sb, i, NULL); | 
|  | 1459 | if (!gdp) | 
|  | 1460 | continue; | 
|  | 1461 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | 
|  | 1462 | } | 
|  | 1463 |  | 
|  | 1464 | return desc_count; | 
|  | 1465 | #endif | 
|  | 1466 | } | 
|  | 1467 |  | 
|  | 1468 | static inline int | 
|  | 1469 | block_in_use(unsigned long block, struct super_block *sb, unsigned char *map) | 
|  | 1470 | { | 
|  | 1471 | return ext3_test_bit ((block - | 
|  | 1472 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) % | 
|  | 1473 | EXT3_BLOCKS_PER_GROUP(sb), map); | 
|  | 1474 | } | 
|  | 1475 |  | 
|  | 1476 | static inline int test_root(int a, int b) | 
|  | 1477 | { | 
|  | 1478 | int num = b; | 
|  | 1479 |  | 
|  | 1480 | while (a > num) | 
|  | 1481 | num *= b; | 
|  | 1482 | return num == a; | 
|  | 1483 | } | 
|  | 1484 |  | 
|  | 1485 | static int ext3_group_sparse(int group) | 
|  | 1486 | { | 
|  | 1487 | if (group <= 1) | 
|  | 1488 | return 1; | 
|  | 1489 | if (!(group & 1)) | 
|  | 1490 | return 0; | 
|  | 1491 | return (test_root(group, 7) || test_root(group, 5) || | 
|  | 1492 | test_root(group, 3)); | 
|  | 1493 | } | 
|  | 1494 |  | 
|  | 1495 | /** | 
|  | 1496 | *	ext3_bg_has_super - number of blocks used by the superblock in group | 
|  | 1497 | *	@sb: superblock for filesystem | 
|  | 1498 | *	@group: group number to check | 
|  | 1499 | * | 
|  | 1500 | *	Return the number of blocks used by the superblock (primary or backup) | 
|  | 1501 | *	in this group.  Currently this will be only 0 or 1. | 
|  | 1502 | */ | 
|  | 1503 | int ext3_bg_has_super(struct super_block *sb, int group) | 
|  | 1504 | { | 
|  | 1505 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&& | 
|  | 1506 | !ext3_group_sparse(group)) | 
|  | 1507 | return 0; | 
|  | 1508 | return 1; | 
|  | 1509 | } | 
|  | 1510 |  | 
|  | 1511 | /** | 
|  | 1512 | *	ext3_bg_num_gdb - number of blocks used by the group table in group | 
|  | 1513 | *	@sb: superblock for filesystem | 
|  | 1514 | *	@group: group number to check | 
|  | 1515 | * | 
|  | 1516 | *	Return the number of blocks used by the group descriptor table | 
|  | 1517 | *	(primary or backup) in this group.  In the future there may be a | 
|  | 1518 | *	different number of descriptor blocks in each group. | 
|  | 1519 | */ | 
|  | 1520 | unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) | 
|  | 1521 | { | 
|  | 1522 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&& | 
|  | 1523 | !ext3_group_sparse(group)) | 
|  | 1524 | return 0; | 
|  | 1525 | return EXT3_SB(sb)->s_gdb_count; | 
|  | 1526 | } | 
|  | 1527 |  | 
|  | 1528 | #ifdef CONFIG_EXT3_CHECK | 
|  | 1529 | /* Called at mount-time, super-block is locked */ | 
|  | 1530 | void ext3_check_blocks_bitmap (struct super_block * sb) | 
|  | 1531 | { | 
|  | 1532 | struct ext3_super_block *es; | 
|  | 1533 | unsigned long desc_count, bitmap_count, x, j; | 
|  | 1534 | unsigned long desc_blocks; | 
|  | 1535 | struct buffer_head *bitmap_bh = NULL; | 
|  | 1536 | struct ext3_group_desc *gdp; | 
|  | 1537 | int i; | 
|  | 1538 |  | 
|  | 1539 | es = EXT3_SB(sb)->s_es; | 
|  | 1540 | desc_count = 0; | 
|  | 1541 | bitmap_count = 0; | 
|  | 1542 | gdp = NULL; | 
|  | 1543 | for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { | 
|  | 1544 | gdp = ext3_get_group_desc (sb, i, NULL); | 
|  | 1545 | if (!gdp) | 
|  | 1546 | continue; | 
|  | 1547 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | 
|  | 1548 | brelse(bitmap_bh); | 
|  | 1549 | bitmap_bh = read_block_bitmap(sb, i); | 
|  | 1550 | if (bitmap_bh == NULL) | 
|  | 1551 | continue; | 
|  | 1552 |  | 
|  | 1553 | if (ext3_bg_has_super(sb, i) && | 
|  | 1554 | !ext3_test_bit(0, bitmap_bh->b_data)) | 
|  | 1555 | ext3_error(sb, __FUNCTION__, | 
|  | 1556 | "Superblock in group %d is marked free", i); | 
|  | 1557 |  | 
|  | 1558 | desc_blocks = ext3_bg_num_gdb(sb, i); | 
|  | 1559 | for (j = 0; j < desc_blocks; j++) | 
|  | 1560 | if (!ext3_test_bit(j + 1, bitmap_bh->b_data)) | 
|  | 1561 | ext3_error(sb, __FUNCTION__, | 
|  | 1562 | "Descriptor block #%ld in group " | 
|  | 1563 | "%d is marked free", j, i); | 
|  | 1564 |  | 
|  | 1565 | if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap), | 
|  | 1566 | sb, bitmap_bh->b_data)) | 
|  | 1567 | ext3_error (sb, "ext3_check_blocks_bitmap", | 
|  | 1568 | "Block bitmap for group %d is marked free", | 
|  | 1569 | i); | 
|  | 1570 |  | 
|  | 1571 | if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap), | 
|  | 1572 | sb, bitmap_bh->b_data)) | 
|  | 1573 | ext3_error (sb, "ext3_check_blocks_bitmap", | 
|  | 1574 | "Inode bitmap for group %d is marked free", | 
|  | 1575 | i); | 
|  | 1576 |  | 
|  | 1577 | for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++) | 
|  | 1578 | if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j, | 
|  | 1579 | sb, bitmap_bh->b_data)) | 
|  | 1580 | ext3_error (sb, "ext3_check_blocks_bitmap", | 
|  | 1581 | "Block #%d of the inode table in " | 
|  | 1582 | "group %d is marked free", j, i); | 
|  | 1583 |  | 
|  | 1584 | x = ext3_count_free(bitmap_bh, sb->s_blocksize); | 
|  | 1585 | if (le16_to_cpu(gdp->bg_free_blocks_count) != x) | 
|  | 1586 | ext3_error (sb, "ext3_check_blocks_bitmap", | 
|  | 1587 | "Wrong free blocks count for group %d, " | 
|  | 1588 | "stored = %d, counted = %lu", i, | 
|  | 1589 | le16_to_cpu(gdp->bg_free_blocks_count), x); | 
|  | 1590 | bitmap_count += x; | 
|  | 1591 | } | 
|  | 1592 | brelse(bitmap_bh); | 
|  | 1593 | if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count) | 
|  | 1594 | ext3_error (sb, "ext3_check_blocks_bitmap", | 
|  | 1595 | "Wrong free blocks count in super block, " | 
|  | 1596 | "stored = %lu, counted = %lu", | 
|  | 1597 | (unsigned long)le32_to_cpu(es->s_free_blocks_count), | 
|  | 1598 | bitmap_count); | 
|  | 1599 | } | 
|  | 1600 | #endif |