| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2011 Fujitsu.  All rights reserved. | 
|  | 3 | * Written by Miao Xie <miaox@cn.fujitsu.com> | 
|  | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public | 
|  | 7 | * License v2 as published by the Free Software Foundation. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 12 | * General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public | 
|  | 15 | * License along with this program; if not, write to the | 
|  | 16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 17 | * Boston, MA 021110-1307, USA. | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #include <linux/slab.h> | 
|  | 21 | #include "delayed-inode.h" | 
|  | 22 | #include "disk-io.h" | 
|  | 23 | #include "transaction.h" | 
|  | 24 |  | 
|  | 25 | #define BTRFS_DELAYED_WRITEBACK		400 | 
|  | 26 | #define BTRFS_DELAYED_BACKGROUND	100 | 
|  | 27 |  | 
|  | 28 | static struct kmem_cache *delayed_node_cache; | 
|  | 29 |  | 
|  | 30 | int __init btrfs_delayed_inode_init(void) | 
|  | 31 | { | 
|  | 32 | delayed_node_cache = kmem_cache_create("delayed_node", | 
|  | 33 | sizeof(struct btrfs_delayed_node), | 
|  | 34 | 0, | 
|  | 35 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | 
|  | 36 | NULL); | 
|  | 37 | if (!delayed_node_cache) | 
|  | 38 | return -ENOMEM; | 
|  | 39 | return 0; | 
|  | 40 | } | 
|  | 41 |  | 
|  | 42 | void btrfs_delayed_inode_exit(void) | 
|  | 43 | { | 
|  | 44 | if (delayed_node_cache) | 
|  | 45 | kmem_cache_destroy(delayed_node_cache); | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | static inline void btrfs_init_delayed_node( | 
|  | 49 | struct btrfs_delayed_node *delayed_node, | 
|  | 50 | struct btrfs_root *root, u64 inode_id) | 
|  | 51 | { | 
|  | 52 | delayed_node->root = root; | 
|  | 53 | delayed_node->inode_id = inode_id; | 
|  | 54 | atomic_set(&delayed_node->refs, 0); | 
|  | 55 | delayed_node->count = 0; | 
|  | 56 | delayed_node->in_list = 0; | 
|  | 57 | delayed_node->inode_dirty = 0; | 
|  | 58 | delayed_node->ins_root = RB_ROOT; | 
|  | 59 | delayed_node->del_root = RB_ROOT; | 
|  | 60 | mutex_init(&delayed_node->mutex); | 
|  | 61 | delayed_node->index_cnt = 0; | 
|  | 62 | INIT_LIST_HEAD(&delayed_node->n_list); | 
|  | 63 | INIT_LIST_HEAD(&delayed_node->p_list); | 
|  | 64 | delayed_node->bytes_reserved = 0; | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | static inline int btrfs_is_continuous_delayed_item( | 
|  | 68 | struct btrfs_delayed_item *item1, | 
|  | 69 | struct btrfs_delayed_item *item2) | 
|  | 70 | { | 
|  | 71 | if (item1->key.type == BTRFS_DIR_INDEX_KEY && | 
|  | 72 | item1->key.objectid == item2->key.objectid && | 
|  | 73 | item1->key.type == item2->key.type && | 
|  | 74 | item1->key.offset + 1 == item2->key.offset) | 
|  | 75 | return 1; | 
|  | 76 | return 0; | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | static inline struct btrfs_delayed_root *btrfs_get_delayed_root( | 
|  | 80 | struct btrfs_root *root) | 
|  | 81 | { | 
|  | 82 | return root->fs_info->delayed_root; | 
|  | 83 | } | 
|  | 84 |  | 
| Miao Xie | 2f7e33d | 2011-06-23 07:27:13 +0000 | [diff] [blame] | 85 | static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) | 
|  | 86 | { | 
|  | 87 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | 
|  | 88 | struct btrfs_root *root = btrfs_inode->root; | 
|  | 89 | u64 ino = btrfs_ino(inode); | 
|  | 90 | struct btrfs_delayed_node *node; | 
|  | 91 |  | 
|  | 92 | node = ACCESS_ONCE(btrfs_inode->delayed_node); | 
|  | 93 | if (node) { | 
|  | 94 | atomic_inc(&node->refs); | 
|  | 95 | return node; | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | spin_lock(&root->inode_lock); | 
|  | 99 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); | 
|  | 100 | if (node) { | 
|  | 101 | if (btrfs_inode->delayed_node) { | 
|  | 102 | atomic_inc(&node->refs);	/* can be accessed */ | 
|  | 103 | BUG_ON(btrfs_inode->delayed_node != node); | 
|  | 104 | spin_unlock(&root->inode_lock); | 
|  | 105 | return node; | 
|  | 106 | } | 
|  | 107 | btrfs_inode->delayed_node = node; | 
|  | 108 | atomic_inc(&node->refs);	/* can be accessed */ | 
|  | 109 | atomic_inc(&node->refs);	/* cached in the inode */ | 
|  | 110 | spin_unlock(&root->inode_lock); | 
|  | 111 | return node; | 
|  | 112 | } | 
|  | 113 | spin_unlock(&root->inode_lock); | 
|  | 114 |  | 
|  | 115 | return NULL; | 
|  | 116 | } | 
|  | 117 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 118 | static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( | 
|  | 119 | struct inode *inode) | 
|  | 120 | { | 
|  | 121 | struct btrfs_delayed_node *node; | 
|  | 122 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | 
|  | 123 | struct btrfs_root *root = btrfs_inode->root; | 
| Chris Mason | 0d0ca30 | 2011-05-22 07:11:22 -0400 | [diff] [blame] | 124 | u64 ino = btrfs_ino(inode); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 125 | int ret; | 
|  | 126 |  | 
|  | 127 | again: | 
| Miao Xie | 2f7e33d | 2011-06-23 07:27:13 +0000 | [diff] [blame] | 128 | node = btrfs_get_delayed_node(inode); | 
|  | 129 | if (node) | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 130 | return node; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 131 |  | 
|  | 132 | node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); | 
|  | 133 | if (!node) | 
|  | 134 | return ERR_PTR(-ENOMEM); | 
| Chris Mason | 0d0ca30 | 2011-05-22 07:11:22 -0400 | [diff] [blame] | 135 | btrfs_init_delayed_node(node, root, ino); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 136 |  | 
|  | 137 | atomic_inc(&node->refs);	/* cached in the btrfs inode */ | 
|  | 138 | atomic_inc(&node->refs);	/* can be accessed */ | 
|  | 139 |  | 
|  | 140 | ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); | 
|  | 141 | if (ret) { | 
|  | 142 | kmem_cache_free(delayed_node_cache, node); | 
|  | 143 | return ERR_PTR(ret); | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | spin_lock(&root->inode_lock); | 
| Chris Mason | 0d0ca30 | 2011-05-22 07:11:22 -0400 | [diff] [blame] | 147 | ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 148 | if (ret == -EEXIST) { | 
|  | 149 | kmem_cache_free(delayed_node_cache, node); | 
|  | 150 | spin_unlock(&root->inode_lock); | 
|  | 151 | radix_tree_preload_end(); | 
|  | 152 | goto again; | 
|  | 153 | } | 
|  | 154 | btrfs_inode->delayed_node = node; | 
|  | 155 | spin_unlock(&root->inode_lock); | 
|  | 156 | radix_tree_preload_end(); | 
|  | 157 |  | 
|  | 158 | return node; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | /* | 
|  | 162 | * Call it when holding delayed_node->mutex | 
|  | 163 | * | 
|  | 164 | * If mod = 1, add this node into the prepared list. | 
|  | 165 | */ | 
|  | 166 | static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, | 
|  | 167 | struct btrfs_delayed_node *node, | 
|  | 168 | int mod) | 
|  | 169 | { | 
|  | 170 | spin_lock(&root->lock); | 
|  | 171 | if (node->in_list) { | 
|  | 172 | if (!list_empty(&node->p_list)) | 
|  | 173 | list_move_tail(&node->p_list, &root->prepare_list); | 
|  | 174 | else if (mod) | 
|  | 175 | list_add_tail(&node->p_list, &root->prepare_list); | 
|  | 176 | } else { | 
|  | 177 | list_add_tail(&node->n_list, &root->node_list); | 
|  | 178 | list_add_tail(&node->p_list, &root->prepare_list); | 
|  | 179 | atomic_inc(&node->refs);	/* inserted into list */ | 
|  | 180 | root->nodes++; | 
|  | 181 | node->in_list = 1; | 
|  | 182 | } | 
|  | 183 | spin_unlock(&root->lock); | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | /* Call it when holding delayed_node->mutex */ | 
|  | 187 | static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, | 
|  | 188 | struct btrfs_delayed_node *node) | 
|  | 189 | { | 
|  | 190 | spin_lock(&root->lock); | 
|  | 191 | if (node->in_list) { | 
|  | 192 | root->nodes--; | 
|  | 193 | atomic_dec(&node->refs);	/* not in the list */ | 
|  | 194 | list_del_init(&node->n_list); | 
|  | 195 | if (!list_empty(&node->p_list)) | 
|  | 196 | list_del_init(&node->p_list); | 
|  | 197 | node->in_list = 0; | 
|  | 198 | } | 
|  | 199 | spin_unlock(&root->lock); | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | struct btrfs_delayed_node *btrfs_first_delayed_node( | 
|  | 203 | struct btrfs_delayed_root *delayed_root) | 
|  | 204 | { | 
|  | 205 | struct list_head *p; | 
|  | 206 | struct btrfs_delayed_node *node = NULL; | 
|  | 207 |  | 
|  | 208 | spin_lock(&delayed_root->lock); | 
|  | 209 | if (list_empty(&delayed_root->node_list)) | 
|  | 210 | goto out; | 
|  | 211 |  | 
|  | 212 | p = delayed_root->node_list.next; | 
|  | 213 | node = list_entry(p, struct btrfs_delayed_node, n_list); | 
|  | 214 | atomic_inc(&node->refs); | 
|  | 215 | out: | 
|  | 216 | spin_unlock(&delayed_root->lock); | 
|  | 217 |  | 
|  | 218 | return node; | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | struct btrfs_delayed_node *btrfs_next_delayed_node( | 
|  | 222 | struct btrfs_delayed_node *node) | 
|  | 223 | { | 
|  | 224 | struct btrfs_delayed_root *delayed_root; | 
|  | 225 | struct list_head *p; | 
|  | 226 | struct btrfs_delayed_node *next = NULL; | 
|  | 227 |  | 
|  | 228 | delayed_root = node->root->fs_info->delayed_root; | 
|  | 229 | spin_lock(&delayed_root->lock); | 
|  | 230 | if (!node->in_list) {	/* not in the list */ | 
|  | 231 | if (list_empty(&delayed_root->node_list)) | 
|  | 232 | goto out; | 
|  | 233 | p = delayed_root->node_list.next; | 
|  | 234 | } else if (list_is_last(&node->n_list, &delayed_root->node_list)) | 
|  | 235 | goto out; | 
|  | 236 | else | 
|  | 237 | p = node->n_list.next; | 
|  | 238 |  | 
|  | 239 | next = list_entry(p, struct btrfs_delayed_node, n_list); | 
|  | 240 | atomic_inc(&next->refs); | 
|  | 241 | out: | 
|  | 242 | spin_unlock(&delayed_root->lock); | 
|  | 243 |  | 
|  | 244 | return next; | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | static void __btrfs_release_delayed_node( | 
|  | 248 | struct btrfs_delayed_node *delayed_node, | 
|  | 249 | int mod) | 
|  | 250 | { | 
|  | 251 | struct btrfs_delayed_root *delayed_root; | 
|  | 252 |  | 
|  | 253 | if (!delayed_node) | 
|  | 254 | return; | 
|  | 255 |  | 
|  | 256 | delayed_root = delayed_node->root->fs_info->delayed_root; | 
|  | 257 |  | 
|  | 258 | mutex_lock(&delayed_node->mutex); | 
|  | 259 | if (delayed_node->count) | 
|  | 260 | btrfs_queue_delayed_node(delayed_root, delayed_node, mod); | 
|  | 261 | else | 
|  | 262 | btrfs_dequeue_delayed_node(delayed_root, delayed_node); | 
|  | 263 | mutex_unlock(&delayed_node->mutex); | 
|  | 264 |  | 
|  | 265 | if (atomic_dec_and_test(&delayed_node->refs)) { | 
|  | 266 | struct btrfs_root *root = delayed_node->root; | 
|  | 267 | spin_lock(&root->inode_lock); | 
|  | 268 | if (atomic_read(&delayed_node->refs) == 0) { | 
|  | 269 | radix_tree_delete(&root->delayed_nodes_tree, | 
|  | 270 | delayed_node->inode_id); | 
|  | 271 | kmem_cache_free(delayed_node_cache, delayed_node); | 
|  | 272 | } | 
|  | 273 | spin_unlock(&root->inode_lock); | 
|  | 274 | } | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) | 
|  | 278 | { | 
|  | 279 | __btrfs_release_delayed_node(node, 0); | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( | 
|  | 283 | struct btrfs_delayed_root *delayed_root) | 
|  | 284 | { | 
|  | 285 | struct list_head *p; | 
|  | 286 | struct btrfs_delayed_node *node = NULL; | 
|  | 287 |  | 
|  | 288 | spin_lock(&delayed_root->lock); | 
|  | 289 | if (list_empty(&delayed_root->prepare_list)) | 
|  | 290 | goto out; | 
|  | 291 |  | 
|  | 292 | p = delayed_root->prepare_list.next; | 
|  | 293 | list_del_init(p); | 
|  | 294 | node = list_entry(p, struct btrfs_delayed_node, p_list); | 
|  | 295 | atomic_inc(&node->refs); | 
|  | 296 | out: | 
|  | 297 | spin_unlock(&delayed_root->lock); | 
|  | 298 |  | 
|  | 299 | return node; | 
|  | 300 | } | 
|  | 301 |  | 
|  | 302 | static inline void btrfs_release_prepared_delayed_node( | 
|  | 303 | struct btrfs_delayed_node *node) | 
|  | 304 | { | 
|  | 305 | __btrfs_release_delayed_node(node, 1); | 
|  | 306 | } | 
|  | 307 |  | 
|  | 308 | struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) | 
|  | 309 | { | 
|  | 310 | struct btrfs_delayed_item *item; | 
|  | 311 | item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); | 
|  | 312 | if (item) { | 
|  | 313 | item->data_len = data_len; | 
|  | 314 | item->ins_or_del = 0; | 
|  | 315 | item->bytes_reserved = 0; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 316 | item->delayed_node = NULL; | 
|  | 317 | atomic_set(&item->refs, 1); | 
|  | 318 | } | 
|  | 319 | return item; | 
|  | 320 | } | 
|  | 321 |  | 
|  | 322 | /* | 
|  | 323 | * __btrfs_lookup_delayed_item - look up the delayed item by key | 
|  | 324 | * @delayed_node: pointer to the delayed node | 
|  | 325 | * @key:	  the key to look up | 
|  | 326 | * @prev:	  used to store the prev item if the right item isn't found | 
|  | 327 | * @next:	  used to store the next item if the right item isn't found | 
|  | 328 | * | 
|  | 329 | * Note: if we don't find the right item, we will return the prev item and | 
|  | 330 | * the next item. | 
|  | 331 | */ | 
|  | 332 | static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( | 
|  | 333 | struct rb_root *root, | 
|  | 334 | struct btrfs_key *key, | 
|  | 335 | struct btrfs_delayed_item **prev, | 
|  | 336 | struct btrfs_delayed_item **next) | 
|  | 337 | { | 
|  | 338 | struct rb_node *node, *prev_node = NULL; | 
|  | 339 | struct btrfs_delayed_item *delayed_item = NULL; | 
|  | 340 | int ret = 0; | 
|  | 341 |  | 
|  | 342 | node = root->rb_node; | 
|  | 343 |  | 
|  | 344 | while (node) { | 
|  | 345 | delayed_item = rb_entry(node, struct btrfs_delayed_item, | 
|  | 346 | rb_node); | 
|  | 347 | prev_node = node; | 
|  | 348 | ret = btrfs_comp_cpu_keys(&delayed_item->key, key); | 
|  | 349 | if (ret < 0) | 
|  | 350 | node = node->rb_right; | 
|  | 351 | else if (ret > 0) | 
|  | 352 | node = node->rb_left; | 
|  | 353 | else | 
|  | 354 | return delayed_item; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | if (prev) { | 
|  | 358 | if (!prev_node) | 
|  | 359 | *prev = NULL; | 
|  | 360 | else if (ret < 0) | 
|  | 361 | *prev = delayed_item; | 
|  | 362 | else if ((node = rb_prev(prev_node)) != NULL) { | 
|  | 363 | *prev = rb_entry(node, struct btrfs_delayed_item, | 
|  | 364 | rb_node); | 
|  | 365 | } else | 
|  | 366 | *prev = NULL; | 
|  | 367 | } | 
|  | 368 |  | 
|  | 369 | if (next) { | 
|  | 370 | if (!prev_node) | 
|  | 371 | *next = NULL; | 
|  | 372 | else if (ret > 0) | 
|  | 373 | *next = delayed_item; | 
|  | 374 | else if ((node = rb_next(prev_node)) != NULL) { | 
|  | 375 | *next = rb_entry(node, struct btrfs_delayed_item, | 
|  | 376 | rb_node); | 
|  | 377 | } else | 
|  | 378 | *next = NULL; | 
|  | 379 | } | 
|  | 380 | return NULL; | 
|  | 381 | } | 
|  | 382 |  | 
|  | 383 | struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( | 
|  | 384 | struct btrfs_delayed_node *delayed_node, | 
|  | 385 | struct btrfs_key *key) | 
|  | 386 | { | 
|  | 387 | struct btrfs_delayed_item *item; | 
|  | 388 |  | 
|  | 389 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | 
|  | 390 | NULL, NULL); | 
|  | 391 | return item; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( | 
|  | 395 | struct btrfs_delayed_node *delayed_node, | 
|  | 396 | struct btrfs_key *key) | 
|  | 397 | { | 
|  | 398 | struct btrfs_delayed_item *item; | 
|  | 399 |  | 
|  | 400 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | 
|  | 401 | NULL, NULL); | 
|  | 402 | return item; | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( | 
|  | 406 | struct btrfs_delayed_node *delayed_node, | 
|  | 407 | struct btrfs_key *key) | 
|  | 408 | { | 
|  | 409 | struct btrfs_delayed_item *item, *next; | 
|  | 410 |  | 
|  | 411 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | 
|  | 412 | NULL, &next); | 
|  | 413 | if (!item) | 
|  | 414 | item = next; | 
|  | 415 |  | 
|  | 416 | return item; | 
|  | 417 | } | 
|  | 418 |  | 
|  | 419 | struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( | 
|  | 420 | struct btrfs_delayed_node *delayed_node, | 
|  | 421 | struct btrfs_key *key) | 
|  | 422 | { | 
|  | 423 | struct btrfs_delayed_item *item, *next; | 
|  | 424 |  | 
|  | 425 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | 
|  | 426 | NULL, &next); | 
|  | 427 | if (!item) | 
|  | 428 | item = next; | 
|  | 429 |  | 
|  | 430 | return item; | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, | 
|  | 434 | struct btrfs_delayed_item *ins, | 
|  | 435 | int action) | 
|  | 436 | { | 
|  | 437 | struct rb_node **p, *node; | 
|  | 438 | struct rb_node *parent_node = NULL; | 
|  | 439 | struct rb_root *root; | 
|  | 440 | struct btrfs_delayed_item *item; | 
|  | 441 | int cmp; | 
|  | 442 |  | 
|  | 443 | if (action == BTRFS_DELAYED_INSERTION_ITEM) | 
|  | 444 | root = &delayed_node->ins_root; | 
|  | 445 | else if (action == BTRFS_DELAYED_DELETION_ITEM) | 
|  | 446 | root = &delayed_node->del_root; | 
|  | 447 | else | 
|  | 448 | BUG(); | 
|  | 449 | p = &root->rb_node; | 
|  | 450 | node = &ins->rb_node; | 
|  | 451 |  | 
|  | 452 | while (*p) { | 
|  | 453 | parent_node = *p; | 
|  | 454 | item = rb_entry(parent_node, struct btrfs_delayed_item, | 
|  | 455 | rb_node); | 
|  | 456 |  | 
|  | 457 | cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); | 
|  | 458 | if (cmp < 0) | 
|  | 459 | p = &(*p)->rb_right; | 
|  | 460 | else if (cmp > 0) | 
|  | 461 | p = &(*p)->rb_left; | 
|  | 462 | else | 
|  | 463 | return -EEXIST; | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | rb_link_node(node, parent_node, p); | 
|  | 467 | rb_insert_color(node, root); | 
|  | 468 | ins->delayed_node = delayed_node; | 
|  | 469 | ins->ins_or_del = action; | 
|  | 470 |  | 
|  | 471 | if (ins->key.type == BTRFS_DIR_INDEX_KEY && | 
|  | 472 | action == BTRFS_DELAYED_INSERTION_ITEM && | 
|  | 473 | ins->key.offset >= delayed_node->index_cnt) | 
|  | 474 | delayed_node->index_cnt = ins->key.offset + 1; | 
|  | 475 |  | 
|  | 476 | delayed_node->count++; | 
|  | 477 | atomic_inc(&delayed_node->root->fs_info->delayed_root->items); | 
|  | 478 | return 0; | 
|  | 479 | } | 
|  | 480 |  | 
|  | 481 | static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, | 
|  | 482 | struct btrfs_delayed_item *item) | 
|  | 483 | { | 
|  | 484 | return __btrfs_add_delayed_item(node, item, | 
|  | 485 | BTRFS_DELAYED_INSERTION_ITEM); | 
|  | 486 | } | 
|  | 487 |  | 
|  | 488 | static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, | 
|  | 489 | struct btrfs_delayed_item *item) | 
|  | 490 | { | 
|  | 491 | return __btrfs_add_delayed_item(node, item, | 
|  | 492 | BTRFS_DELAYED_DELETION_ITEM); | 
|  | 493 | } | 
|  | 494 |  | 
|  | 495 | static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) | 
|  | 496 | { | 
|  | 497 | struct rb_root *root; | 
|  | 498 | struct btrfs_delayed_root *delayed_root; | 
|  | 499 |  | 
|  | 500 | delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; | 
|  | 501 |  | 
|  | 502 | BUG_ON(!delayed_root); | 
|  | 503 | BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && | 
|  | 504 | delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); | 
|  | 505 |  | 
|  | 506 | if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) | 
|  | 507 | root = &delayed_item->delayed_node->ins_root; | 
|  | 508 | else | 
|  | 509 | root = &delayed_item->delayed_node->del_root; | 
|  | 510 |  | 
|  | 511 | rb_erase(&delayed_item->rb_node, root); | 
|  | 512 | delayed_item->delayed_node->count--; | 
|  | 513 | atomic_dec(&delayed_root->items); | 
|  | 514 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && | 
|  | 515 | waitqueue_active(&delayed_root->wait)) | 
|  | 516 | wake_up(&delayed_root->wait); | 
|  | 517 | } | 
|  | 518 |  | 
|  | 519 | static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) | 
|  | 520 | { | 
|  | 521 | if (item) { | 
|  | 522 | __btrfs_remove_delayed_item(item); | 
|  | 523 | if (atomic_dec_and_test(&item->refs)) | 
|  | 524 | kfree(item); | 
|  | 525 | } | 
|  | 526 | } | 
|  | 527 |  | 
|  | 528 | struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( | 
|  | 529 | struct btrfs_delayed_node *delayed_node) | 
|  | 530 | { | 
|  | 531 | struct rb_node *p; | 
|  | 532 | struct btrfs_delayed_item *item = NULL; | 
|  | 533 |  | 
|  | 534 | p = rb_first(&delayed_node->ins_root); | 
|  | 535 | if (p) | 
|  | 536 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | 
|  | 537 |  | 
|  | 538 | return item; | 
|  | 539 | } | 
|  | 540 |  | 
|  | 541 | struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( | 
|  | 542 | struct btrfs_delayed_node *delayed_node) | 
|  | 543 | { | 
|  | 544 | struct rb_node *p; | 
|  | 545 | struct btrfs_delayed_item *item = NULL; | 
|  | 546 |  | 
|  | 547 | p = rb_first(&delayed_node->del_root); | 
|  | 548 | if (p) | 
|  | 549 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | 
|  | 550 |  | 
|  | 551 | return item; | 
|  | 552 | } | 
|  | 553 |  | 
|  | 554 | struct btrfs_delayed_item *__btrfs_next_delayed_item( | 
|  | 555 | struct btrfs_delayed_item *item) | 
|  | 556 | { | 
|  | 557 | struct rb_node *p; | 
|  | 558 | struct btrfs_delayed_item *next = NULL; | 
|  | 559 |  | 
|  | 560 | p = rb_next(&item->rb_node); | 
|  | 561 | if (p) | 
|  | 562 | next = rb_entry(p, struct btrfs_delayed_item, rb_node); | 
|  | 563 |  | 
|  | 564 | return next; | 
|  | 565 | } | 
|  | 566 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 567 | static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, | 
|  | 568 | u64 root_id) | 
|  | 569 | { | 
|  | 570 | struct btrfs_key root_key; | 
|  | 571 |  | 
|  | 572 | if (root->objectid == root_id) | 
|  | 573 | return root; | 
|  | 574 |  | 
|  | 575 | root_key.objectid = root_id; | 
|  | 576 | root_key.type = BTRFS_ROOT_ITEM_KEY; | 
|  | 577 | root_key.offset = (u64)-1; | 
|  | 578 | return btrfs_read_fs_root_no_name(root->fs_info, &root_key); | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, | 
|  | 582 | struct btrfs_root *root, | 
|  | 583 | struct btrfs_delayed_item *item) | 
|  | 584 | { | 
|  | 585 | struct btrfs_block_rsv *src_rsv; | 
|  | 586 | struct btrfs_block_rsv *dst_rsv; | 
|  | 587 | u64 num_bytes; | 
|  | 588 | int ret; | 
|  | 589 |  | 
|  | 590 | if (!trans->bytes_reserved) | 
|  | 591 | return 0; | 
|  | 592 |  | 
|  | 593 | src_rsv = trans->block_rsv; | 
|  | 594 | dst_rsv = &root->fs_info->global_block_rsv; | 
|  | 595 |  | 
|  | 596 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | 
|  | 597 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 598 | if (!ret) | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 599 | item->bytes_reserved = num_bytes; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 600 |  | 
|  | 601 | return ret; | 
|  | 602 | } | 
|  | 603 |  | 
|  | 604 | static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, | 
|  | 605 | struct btrfs_delayed_item *item) | 
|  | 606 | { | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 607 | struct btrfs_block_rsv *rsv; | 
|  | 608 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 609 | if (!item->bytes_reserved) | 
|  | 610 | return; | 
|  | 611 |  | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 612 | rsv = &root->fs_info->global_block_rsv; | 
|  | 613 | btrfs_block_rsv_release(root, rsv, | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 614 | item->bytes_reserved); | 
|  | 615 | } | 
|  | 616 |  | 
|  | 617 | static int btrfs_delayed_inode_reserve_metadata( | 
|  | 618 | struct btrfs_trans_handle *trans, | 
|  | 619 | struct btrfs_root *root, | 
|  | 620 | struct btrfs_delayed_node *node) | 
|  | 621 | { | 
|  | 622 | struct btrfs_block_rsv *src_rsv; | 
|  | 623 | struct btrfs_block_rsv *dst_rsv; | 
|  | 624 | u64 num_bytes; | 
|  | 625 | int ret; | 
|  | 626 |  | 
|  | 627 | if (!trans->bytes_reserved) | 
|  | 628 | return 0; | 
|  | 629 |  | 
|  | 630 | src_rsv = trans->block_rsv; | 
|  | 631 | dst_rsv = &root->fs_info->global_block_rsv; | 
|  | 632 |  | 
|  | 633 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | 
|  | 634 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | 
|  | 635 | if (!ret) | 
|  | 636 | node->bytes_reserved = num_bytes; | 
|  | 637 |  | 
|  | 638 | return ret; | 
|  | 639 | } | 
|  | 640 |  | 
|  | 641 | static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, | 
|  | 642 | struct btrfs_delayed_node *node) | 
|  | 643 | { | 
|  | 644 | struct btrfs_block_rsv *rsv; | 
|  | 645 |  | 
|  | 646 | if (!node->bytes_reserved) | 
|  | 647 | return; | 
|  | 648 |  | 
|  | 649 | rsv = &root->fs_info->global_block_rsv; | 
|  | 650 | btrfs_block_rsv_release(root, rsv, | 
|  | 651 | node->bytes_reserved); | 
|  | 652 | node->bytes_reserved = 0; | 
|  | 653 | } | 
|  | 654 |  | 
|  | 655 | /* | 
|  | 656 | * This helper will insert some continuous items into the same leaf according | 
|  | 657 | * to the free space of the leaf. | 
|  | 658 | */ | 
|  | 659 | static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, | 
|  | 660 | struct btrfs_root *root, | 
|  | 661 | struct btrfs_path *path, | 
|  | 662 | struct btrfs_delayed_item *item) | 
|  | 663 | { | 
|  | 664 | struct btrfs_delayed_item *curr, *next; | 
|  | 665 | int free_space; | 
|  | 666 | int total_data_size = 0, total_size = 0; | 
|  | 667 | struct extent_buffer *leaf; | 
|  | 668 | char *data_ptr; | 
|  | 669 | struct btrfs_key *keys; | 
|  | 670 | u32 *data_size; | 
|  | 671 | struct list_head head; | 
|  | 672 | int slot; | 
|  | 673 | int nitems; | 
|  | 674 | int i; | 
|  | 675 | int ret = 0; | 
|  | 676 |  | 
|  | 677 | BUG_ON(!path->nodes[0]); | 
|  | 678 |  | 
|  | 679 | leaf = path->nodes[0]; | 
|  | 680 | free_space = btrfs_leaf_free_space(root, leaf); | 
|  | 681 | INIT_LIST_HEAD(&head); | 
|  | 682 |  | 
|  | 683 | next = item; | 
| Chris Mason | 17aca1c | 2011-06-03 01:13:45 -0400 | [diff] [blame] | 684 | nitems = 0; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 685 |  | 
|  | 686 | /* | 
|  | 687 | * count the number of the continuous items that we can insert in batch | 
|  | 688 | */ | 
|  | 689 | while (total_size + next->data_len + sizeof(struct btrfs_item) <= | 
|  | 690 | free_space) { | 
|  | 691 | total_data_size += next->data_len; | 
|  | 692 | total_size += next->data_len + sizeof(struct btrfs_item); | 
|  | 693 | list_add_tail(&next->tree_list, &head); | 
|  | 694 | nitems++; | 
|  | 695 |  | 
|  | 696 | curr = next; | 
|  | 697 | next = __btrfs_next_delayed_item(curr); | 
|  | 698 | if (!next) | 
|  | 699 | break; | 
|  | 700 |  | 
|  | 701 | if (!btrfs_is_continuous_delayed_item(curr, next)) | 
|  | 702 | break; | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | if (!nitems) { | 
|  | 706 | ret = 0; | 
|  | 707 | goto out; | 
|  | 708 | } | 
|  | 709 |  | 
|  | 710 | /* | 
|  | 711 | * we need allocate some memory space, but it might cause the task | 
|  | 712 | * to sleep, so we set all locked nodes in the path to blocking locks | 
|  | 713 | * first. | 
|  | 714 | */ | 
|  | 715 | btrfs_set_path_blocking(path); | 
|  | 716 |  | 
|  | 717 | keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); | 
|  | 718 | if (!keys) { | 
|  | 719 | ret = -ENOMEM; | 
|  | 720 | goto out; | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); | 
|  | 724 | if (!data_size) { | 
|  | 725 | ret = -ENOMEM; | 
|  | 726 | goto error; | 
|  | 727 | } | 
|  | 728 |  | 
|  | 729 | /* get keys of all the delayed items */ | 
|  | 730 | i = 0; | 
|  | 731 | list_for_each_entry(next, &head, tree_list) { | 
|  | 732 | keys[i] = next->key; | 
|  | 733 | data_size[i] = next->data_len; | 
|  | 734 | i++; | 
|  | 735 | } | 
|  | 736 |  | 
|  | 737 | /* reset all the locked nodes in the patch to spinning locks. */ | 
|  | 738 | btrfs_clear_path_blocking(path, NULL); | 
|  | 739 |  | 
|  | 740 | /* insert the keys of the items */ | 
|  | 741 | ret = setup_items_for_insert(trans, root, path, keys, data_size, | 
|  | 742 | total_data_size, total_size, nitems); | 
|  | 743 | if (ret) | 
|  | 744 | goto error; | 
|  | 745 |  | 
|  | 746 | /* insert the dir index items */ | 
|  | 747 | slot = path->slots[0]; | 
|  | 748 | list_for_each_entry_safe(curr, next, &head, tree_list) { | 
|  | 749 | data_ptr = btrfs_item_ptr(leaf, slot, char); | 
|  | 750 | write_extent_buffer(leaf, &curr->data, | 
|  | 751 | (unsigned long)data_ptr, | 
|  | 752 | curr->data_len); | 
|  | 753 | slot++; | 
|  | 754 |  | 
|  | 755 | btrfs_delayed_item_release_metadata(root, curr); | 
|  | 756 |  | 
|  | 757 | list_del(&curr->tree_list); | 
|  | 758 | btrfs_release_delayed_item(curr); | 
|  | 759 | } | 
|  | 760 |  | 
|  | 761 | error: | 
|  | 762 | kfree(data_size); | 
|  | 763 | kfree(keys); | 
|  | 764 | out: | 
|  | 765 | return ret; | 
|  | 766 | } | 
|  | 767 |  | 
|  | 768 | /* | 
|  | 769 | * This helper can just do simple insertion that needn't extend item for new | 
|  | 770 | * data, such as directory name index insertion, inode insertion. | 
|  | 771 | */ | 
|  | 772 | static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, | 
|  | 773 | struct btrfs_root *root, | 
|  | 774 | struct btrfs_path *path, | 
|  | 775 | struct btrfs_delayed_item *delayed_item) | 
|  | 776 | { | 
|  | 777 | struct extent_buffer *leaf; | 
|  | 778 | struct btrfs_item *item; | 
|  | 779 | char *ptr; | 
|  | 780 | int ret; | 
|  | 781 |  | 
|  | 782 | ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, | 
|  | 783 | delayed_item->data_len); | 
|  | 784 | if (ret < 0 && ret != -EEXIST) | 
|  | 785 | return ret; | 
|  | 786 |  | 
|  | 787 | leaf = path->nodes[0]; | 
|  | 788 |  | 
|  | 789 | item = btrfs_item_nr(leaf, path->slots[0]); | 
|  | 790 | ptr = btrfs_item_ptr(leaf, path->slots[0], char); | 
|  | 791 |  | 
|  | 792 | write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, | 
|  | 793 | delayed_item->data_len); | 
|  | 794 | btrfs_mark_buffer_dirty(leaf); | 
|  | 795 |  | 
|  | 796 | btrfs_delayed_item_release_metadata(root, delayed_item); | 
|  | 797 | return 0; | 
|  | 798 | } | 
|  | 799 |  | 
|  | 800 | /* | 
|  | 801 | * we insert an item first, then if there are some continuous items, we try | 
|  | 802 | * to insert those items into the same leaf. | 
|  | 803 | */ | 
|  | 804 | static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, | 
|  | 805 | struct btrfs_path *path, | 
|  | 806 | struct btrfs_root *root, | 
|  | 807 | struct btrfs_delayed_node *node) | 
|  | 808 | { | 
|  | 809 | struct btrfs_delayed_item *curr, *prev; | 
|  | 810 | int ret = 0; | 
|  | 811 |  | 
|  | 812 | do_again: | 
|  | 813 | mutex_lock(&node->mutex); | 
|  | 814 | curr = __btrfs_first_delayed_insertion_item(node); | 
|  | 815 | if (!curr) | 
|  | 816 | goto insert_end; | 
|  | 817 |  | 
|  | 818 | ret = btrfs_insert_delayed_item(trans, root, path, curr); | 
|  | 819 | if (ret < 0) { | 
| Chris Mason | 945d896 | 2011-05-22 12:33:42 -0400 | [diff] [blame] | 820 | btrfs_release_path(path); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 821 | goto insert_end; | 
|  | 822 | } | 
|  | 823 |  | 
|  | 824 | prev = curr; | 
|  | 825 | curr = __btrfs_next_delayed_item(prev); | 
|  | 826 | if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { | 
|  | 827 | /* insert the continuous items into the same leaf */ | 
|  | 828 | path->slots[0]++; | 
|  | 829 | btrfs_batch_insert_items(trans, root, path, curr); | 
|  | 830 | } | 
|  | 831 | btrfs_release_delayed_item(prev); | 
|  | 832 | btrfs_mark_buffer_dirty(path->nodes[0]); | 
|  | 833 |  | 
| Chris Mason | 945d896 | 2011-05-22 12:33:42 -0400 | [diff] [blame] | 834 | btrfs_release_path(path); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 835 | mutex_unlock(&node->mutex); | 
|  | 836 | goto do_again; | 
|  | 837 |  | 
|  | 838 | insert_end: | 
|  | 839 | mutex_unlock(&node->mutex); | 
|  | 840 | return ret; | 
|  | 841 | } | 
|  | 842 |  | 
|  | 843 | static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, | 
|  | 844 | struct btrfs_root *root, | 
|  | 845 | struct btrfs_path *path, | 
|  | 846 | struct btrfs_delayed_item *item) | 
|  | 847 | { | 
|  | 848 | struct btrfs_delayed_item *curr, *next; | 
|  | 849 | struct extent_buffer *leaf; | 
|  | 850 | struct btrfs_key key; | 
|  | 851 | struct list_head head; | 
|  | 852 | int nitems, i, last_item; | 
|  | 853 | int ret = 0; | 
|  | 854 |  | 
|  | 855 | BUG_ON(!path->nodes[0]); | 
|  | 856 |  | 
|  | 857 | leaf = path->nodes[0]; | 
|  | 858 |  | 
|  | 859 | i = path->slots[0]; | 
|  | 860 | last_item = btrfs_header_nritems(leaf) - 1; | 
|  | 861 | if (i > last_item) | 
|  | 862 | return -ENOENT;	/* FIXME: Is errno suitable? */ | 
|  | 863 |  | 
|  | 864 | next = item; | 
|  | 865 | INIT_LIST_HEAD(&head); | 
|  | 866 | btrfs_item_key_to_cpu(leaf, &key, i); | 
|  | 867 | nitems = 0; | 
|  | 868 | /* | 
|  | 869 | * count the number of the dir index items that we can delete in batch | 
|  | 870 | */ | 
|  | 871 | while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { | 
|  | 872 | list_add_tail(&next->tree_list, &head); | 
|  | 873 | nitems++; | 
|  | 874 |  | 
|  | 875 | curr = next; | 
|  | 876 | next = __btrfs_next_delayed_item(curr); | 
|  | 877 | if (!next) | 
|  | 878 | break; | 
|  | 879 |  | 
|  | 880 | if (!btrfs_is_continuous_delayed_item(curr, next)) | 
|  | 881 | break; | 
|  | 882 |  | 
|  | 883 | i++; | 
|  | 884 | if (i > last_item) | 
|  | 885 | break; | 
|  | 886 | btrfs_item_key_to_cpu(leaf, &key, i); | 
|  | 887 | } | 
|  | 888 |  | 
|  | 889 | if (!nitems) | 
|  | 890 | return 0; | 
|  | 891 |  | 
|  | 892 | ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); | 
|  | 893 | if (ret) | 
|  | 894 | goto out; | 
|  | 895 |  | 
|  | 896 | list_for_each_entry_safe(curr, next, &head, tree_list) { | 
|  | 897 | btrfs_delayed_item_release_metadata(root, curr); | 
|  | 898 | list_del(&curr->tree_list); | 
|  | 899 | btrfs_release_delayed_item(curr); | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | out: | 
|  | 903 | return ret; | 
|  | 904 | } | 
|  | 905 |  | 
|  | 906 | static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, | 
|  | 907 | struct btrfs_path *path, | 
|  | 908 | struct btrfs_root *root, | 
|  | 909 | struct btrfs_delayed_node *node) | 
|  | 910 | { | 
|  | 911 | struct btrfs_delayed_item *curr, *prev; | 
|  | 912 | int ret = 0; | 
|  | 913 |  | 
|  | 914 | do_again: | 
|  | 915 | mutex_lock(&node->mutex); | 
|  | 916 | curr = __btrfs_first_delayed_deletion_item(node); | 
|  | 917 | if (!curr) | 
|  | 918 | goto delete_fail; | 
|  | 919 |  | 
|  | 920 | ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); | 
|  | 921 | if (ret < 0) | 
|  | 922 | goto delete_fail; | 
|  | 923 | else if (ret > 0) { | 
|  | 924 | /* | 
|  | 925 | * can't find the item which the node points to, so this node | 
|  | 926 | * is invalid, just drop it. | 
|  | 927 | */ | 
|  | 928 | prev = curr; | 
|  | 929 | curr = __btrfs_next_delayed_item(prev); | 
|  | 930 | btrfs_release_delayed_item(prev); | 
|  | 931 | ret = 0; | 
| Chris Mason | 945d896 | 2011-05-22 12:33:42 -0400 | [diff] [blame] | 932 | btrfs_release_path(path); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 933 | if (curr) | 
|  | 934 | goto do_again; | 
|  | 935 | else | 
|  | 936 | goto delete_fail; | 
|  | 937 | } | 
|  | 938 |  | 
|  | 939 | btrfs_batch_delete_items(trans, root, path, curr); | 
| Chris Mason | 945d896 | 2011-05-22 12:33:42 -0400 | [diff] [blame] | 940 | btrfs_release_path(path); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 941 | mutex_unlock(&node->mutex); | 
|  | 942 | goto do_again; | 
|  | 943 |  | 
|  | 944 | delete_fail: | 
| Chris Mason | 945d896 | 2011-05-22 12:33:42 -0400 | [diff] [blame] | 945 | btrfs_release_path(path); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 946 | mutex_unlock(&node->mutex); | 
|  | 947 | return ret; | 
|  | 948 | } | 
|  | 949 |  | 
|  | 950 | static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) | 
|  | 951 | { | 
|  | 952 | struct btrfs_delayed_root *delayed_root; | 
|  | 953 |  | 
|  | 954 | if (delayed_node && delayed_node->inode_dirty) { | 
|  | 955 | BUG_ON(!delayed_node->root); | 
|  | 956 | delayed_node->inode_dirty = 0; | 
|  | 957 | delayed_node->count--; | 
|  | 958 |  | 
|  | 959 | delayed_root = delayed_node->root->fs_info->delayed_root; | 
|  | 960 | atomic_dec(&delayed_root->items); | 
|  | 961 | if (atomic_read(&delayed_root->items) < | 
|  | 962 | BTRFS_DELAYED_BACKGROUND && | 
|  | 963 | waitqueue_active(&delayed_root->wait)) | 
|  | 964 | wake_up(&delayed_root->wait); | 
|  | 965 | } | 
|  | 966 | } | 
|  | 967 |  | 
|  | 968 | static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, | 
|  | 969 | struct btrfs_root *root, | 
|  | 970 | struct btrfs_path *path, | 
|  | 971 | struct btrfs_delayed_node *node) | 
|  | 972 | { | 
|  | 973 | struct btrfs_key key; | 
|  | 974 | struct btrfs_inode_item *inode_item; | 
|  | 975 | struct extent_buffer *leaf; | 
|  | 976 | int ret; | 
|  | 977 |  | 
|  | 978 | mutex_lock(&node->mutex); | 
|  | 979 | if (!node->inode_dirty) { | 
|  | 980 | mutex_unlock(&node->mutex); | 
|  | 981 | return 0; | 
|  | 982 | } | 
|  | 983 |  | 
|  | 984 | key.objectid = node->inode_id; | 
|  | 985 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | 
|  | 986 | key.offset = 0; | 
|  | 987 | ret = btrfs_lookup_inode(trans, root, path, &key, 1); | 
|  | 988 | if (ret > 0) { | 
| Chris Mason | 945d896 | 2011-05-22 12:33:42 -0400 | [diff] [blame] | 989 | btrfs_release_path(path); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 990 | mutex_unlock(&node->mutex); | 
|  | 991 | return -ENOENT; | 
|  | 992 | } else if (ret < 0) { | 
|  | 993 | mutex_unlock(&node->mutex); | 
|  | 994 | return ret; | 
|  | 995 | } | 
|  | 996 |  | 
|  | 997 | btrfs_unlock_up_safe(path, 1); | 
|  | 998 | leaf = path->nodes[0]; | 
|  | 999 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | 
|  | 1000 | struct btrfs_inode_item); | 
|  | 1001 | write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, | 
|  | 1002 | sizeof(struct btrfs_inode_item)); | 
|  | 1003 | btrfs_mark_buffer_dirty(leaf); | 
| Chris Mason | 945d896 | 2011-05-22 12:33:42 -0400 | [diff] [blame] | 1004 | btrfs_release_path(path); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1005 |  | 
|  | 1006 | btrfs_delayed_inode_release_metadata(root, node); | 
|  | 1007 | btrfs_release_delayed_inode(node); | 
|  | 1008 | mutex_unlock(&node->mutex); | 
|  | 1009 |  | 
|  | 1010 | return 0; | 
|  | 1011 | } | 
|  | 1012 |  | 
|  | 1013 | /* Called when committing the transaction. */ | 
|  | 1014 | int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, | 
|  | 1015 | struct btrfs_root *root) | 
|  | 1016 | { | 
|  | 1017 | struct btrfs_delayed_root *delayed_root; | 
|  | 1018 | struct btrfs_delayed_node *curr_node, *prev_node; | 
|  | 1019 | struct btrfs_path *path; | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1020 | struct btrfs_block_rsv *block_rsv; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1021 | int ret = 0; | 
|  | 1022 |  | 
|  | 1023 | path = btrfs_alloc_path(); | 
|  | 1024 | if (!path) | 
|  | 1025 | return -ENOMEM; | 
|  | 1026 | path->leave_spinning = 1; | 
|  | 1027 |  | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1028 | block_rsv = trans->block_rsv; | 
|  | 1029 | trans->block_rsv = &root->fs_info->global_block_rsv; | 
|  | 1030 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1031 | delayed_root = btrfs_get_delayed_root(root); | 
|  | 1032 |  | 
|  | 1033 | curr_node = btrfs_first_delayed_node(delayed_root); | 
|  | 1034 | while (curr_node) { | 
|  | 1035 | root = curr_node->root; | 
|  | 1036 | ret = btrfs_insert_delayed_items(trans, path, root, | 
|  | 1037 | curr_node); | 
|  | 1038 | if (!ret) | 
|  | 1039 | ret = btrfs_delete_delayed_items(trans, path, root, | 
|  | 1040 | curr_node); | 
|  | 1041 | if (!ret) | 
|  | 1042 | ret = btrfs_update_delayed_inode(trans, root, path, | 
|  | 1043 | curr_node); | 
|  | 1044 | if (ret) { | 
|  | 1045 | btrfs_release_delayed_node(curr_node); | 
|  | 1046 | break; | 
|  | 1047 | } | 
|  | 1048 |  | 
|  | 1049 | prev_node = curr_node; | 
|  | 1050 | curr_node = btrfs_next_delayed_node(curr_node); | 
|  | 1051 | btrfs_release_delayed_node(prev_node); | 
|  | 1052 | } | 
|  | 1053 |  | 
|  | 1054 | btrfs_free_path(path); | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1055 | trans->block_rsv = block_rsv; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1056 | return ret; | 
|  | 1057 | } | 
|  | 1058 |  | 
|  | 1059 | static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | 
|  | 1060 | struct btrfs_delayed_node *node) | 
|  | 1061 | { | 
|  | 1062 | struct btrfs_path *path; | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1063 | struct btrfs_block_rsv *block_rsv; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1064 | int ret; | 
|  | 1065 |  | 
|  | 1066 | path = btrfs_alloc_path(); | 
|  | 1067 | if (!path) | 
|  | 1068 | return -ENOMEM; | 
|  | 1069 | path->leave_spinning = 1; | 
|  | 1070 |  | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1071 | block_rsv = trans->block_rsv; | 
|  | 1072 | trans->block_rsv = &node->root->fs_info->global_block_rsv; | 
|  | 1073 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1074 | ret = btrfs_insert_delayed_items(trans, path, node->root, node); | 
|  | 1075 | if (!ret) | 
|  | 1076 | ret = btrfs_delete_delayed_items(trans, path, node->root, node); | 
|  | 1077 | if (!ret) | 
|  | 1078 | ret = btrfs_update_delayed_inode(trans, node->root, path, node); | 
|  | 1079 | btrfs_free_path(path); | 
|  | 1080 |  | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1081 | trans->block_rsv = block_rsv; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1082 | return ret; | 
|  | 1083 | } | 
|  | 1084 |  | 
|  | 1085 | int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | 
|  | 1086 | struct inode *inode) | 
|  | 1087 | { | 
|  | 1088 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); | 
|  | 1089 | int ret; | 
|  | 1090 |  | 
|  | 1091 | if (!delayed_node) | 
|  | 1092 | return 0; | 
|  | 1093 |  | 
|  | 1094 | mutex_lock(&delayed_node->mutex); | 
|  | 1095 | if (!delayed_node->count) { | 
|  | 1096 | mutex_unlock(&delayed_node->mutex); | 
|  | 1097 | btrfs_release_delayed_node(delayed_node); | 
|  | 1098 | return 0; | 
|  | 1099 | } | 
|  | 1100 | mutex_unlock(&delayed_node->mutex); | 
|  | 1101 |  | 
|  | 1102 | ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); | 
|  | 1103 | btrfs_release_delayed_node(delayed_node); | 
|  | 1104 | return ret; | 
|  | 1105 | } | 
|  | 1106 |  | 
|  | 1107 | void btrfs_remove_delayed_node(struct inode *inode) | 
|  | 1108 | { | 
|  | 1109 | struct btrfs_delayed_node *delayed_node; | 
|  | 1110 |  | 
|  | 1111 | delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); | 
|  | 1112 | if (!delayed_node) | 
|  | 1113 | return; | 
|  | 1114 |  | 
|  | 1115 | BTRFS_I(inode)->delayed_node = NULL; | 
|  | 1116 | btrfs_release_delayed_node(delayed_node); | 
|  | 1117 | } | 
|  | 1118 |  | 
|  | 1119 | struct btrfs_async_delayed_node { | 
|  | 1120 | struct btrfs_root *root; | 
|  | 1121 | struct btrfs_delayed_node *delayed_node; | 
|  | 1122 | struct btrfs_work work; | 
|  | 1123 | }; | 
|  | 1124 |  | 
|  | 1125 | static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) | 
|  | 1126 | { | 
|  | 1127 | struct btrfs_async_delayed_node *async_node; | 
|  | 1128 | struct btrfs_trans_handle *trans; | 
|  | 1129 | struct btrfs_path *path; | 
|  | 1130 | struct btrfs_delayed_node *delayed_node = NULL; | 
|  | 1131 | struct btrfs_root *root; | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1132 | struct btrfs_block_rsv *block_rsv; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1133 | unsigned long nr = 0; | 
|  | 1134 | int need_requeue = 0; | 
|  | 1135 | int ret; | 
|  | 1136 |  | 
|  | 1137 | async_node = container_of(work, struct btrfs_async_delayed_node, work); | 
|  | 1138 |  | 
|  | 1139 | path = btrfs_alloc_path(); | 
|  | 1140 | if (!path) | 
|  | 1141 | goto out; | 
|  | 1142 | path->leave_spinning = 1; | 
|  | 1143 |  | 
|  | 1144 | delayed_node = async_node->delayed_node; | 
|  | 1145 | root = delayed_node->root; | 
|  | 1146 |  | 
| Chris Mason | ff5714c | 2011-05-28 07:00:39 -0400 | [diff] [blame] | 1147 | trans = btrfs_join_transaction(root); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1148 | if (IS_ERR(trans)) | 
|  | 1149 | goto free_path; | 
|  | 1150 |  | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1151 | block_rsv = trans->block_rsv; | 
|  | 1152 | trans->block_rsv = &root->fs_info->global_block_rsv; | 
|  | 1153 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1154 | ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); | 
|  | 1155 | if (!ret) | 
|  | 1156 | ret = btrfs_delete_delayed_items(trans, path, root, | 
|  | 1157 | delayed_node); | 
|  | 1158 |  | 
|  | 1159 | if (!ret) | 
|  | 1160 | btrfs_update_delayed_inode(trans, root, path, delayed_node); | 
|  | 1161 |  | 
|  | 1162 | /* | 
|  | 1163 | * Maybe new delayed items have been inserted, so we need requeue | 
|  | 1164 | * the work. Besides that, we must dequeue the empty delayed nodes | 
|  | 1165 | * to avoid the race between delayed items balance and the worker. | 
|  | 1166 | * The race like this: | 
|  | 1167 | * 	Task1				Worker thread | 
|  | 1168 | * 					count == 0, needn't requeue | 
|  | 1169 | * 					  also needn't insert the | 
|  | 1170 | * 					  delayed node into prepare | 
|  | 1171 | * 					  list again. | 
|  | 1172 | * 	add lots of delayed items | 
|  | 1173 | * 	queue the delayed node | 
|  | 1174 | * 	  already in the list, | 
|  | 1175 | * 	  and not in the prepare | 
|  | 1176 | * 	  list, it means the delayed | 
|  | 1177 | * 	  node is being dealt with | 
|  | 1178 | * 	  by the worker. | 
|  | 1179 | * 	do delayed items balance | 
|  | 1180 | * 	  the delayed node is being | 
|  | 1181 | * 	  dealt with by the worker | 
|  | 1182 | * 	  now, just wait. | 
|  | 1183 | * 	  				the worker goto idle. | 
|  | 1184 | * Task1 will sleep until the transaction is commited. | 
|  | 1185 | */ | 
|  | 1186 | mutex_lock(&delayed_node->mutex); | 
|  | 1187 | if (delayed_node->count) | 
|  | 1188 | need_requeue = 1; | 
|  | 1189 | else | 
|  | 1190 | btrfs_dequeue_delayed_node(root->fs_info->delayed_root, | 
|  | 1191 | delayed_node); | 
|  | 1192 | mutex_unlock(&delayed_node->mutex); | 
|  | 1193 |  | 
|  | 1194 | nr = trans->blocks_used; | 
|  | 1195 |  | 
| Miao Xie | 19fd294 | 2011-06-15 10:47:30 +0000 | [diff] [blame] | 1196 | trans->block_rsv = block_rsv; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1197 | btrfs_end_transaction_dmeta(trans, root); | 
|  | 1198 | __btrfs_btree_balance_dirty(root, nr); | 
|  | 1199 | free_path: | 
|  | 1200 | btrfs_free_path(path); | 
|  | 1201 | out: | 
|  | 1202 | if (need_requeue) | 
|  | 1203 | btrfs_requeue_work(&async_node->work); | 
|  | 1204 | else { | 
|  | 1205 | btrfs_release_prepared_delayed_node(delayed_node); | 
|  | 1206 | kfree(async_node); | 
|  | 1207 | } | 
|  | 1208 | } | 
|  | 1209 |  | 
|  | 1210 | static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, | 
|  | 1211 | struct btrfs_root *root, int all) | 
|  | 1212 | { | 
|  | 1213 | struct btrfs_async_delayed_node *async_node; | 
|  | 1214 | struct btrfs_delayed_node *curr; | 
|  | 1215 | int count = 0; | 
|  | 1216 |  | 
|  | 1217 | again: | 
|  | 1218 | curr = btrfs_first_prepared_delayed_node(delayed_root); | 
|  | 1219 | if (!curr) | 
|  | 1220 | return 0; | 
|  | 1221 |  | 
|  | 1222 | async_node = kmalloc(sizeof(*async_node), GFP_NOFS); | 
|  | 1223 | if (!async_node) { | 
|  | 1224 | btrfs_release_prepared_delayed_node(curr); | 
|  | 1225 | return -ENOMEM; | 
|  | 1226 | } | 
|  | 1227 |  | 
|  | 1228 | async_node->root = root; | 
|  | 1229 | async_node->delayed_node = curr; | 
|  | 1230 |  | 
|  | 1231 | async_node->work.func = btrfs_async_run_delayed_node_done; | 
|  | 1232 | async_node->work.flags = 0; | 
|  | 1233 |  | 
|  | 1234 | btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); | 
|  | 1235 | count++; | 
|  | 1236 |  | 
|  | 1237 | if (all || count < 4) | 
|  | 1238 | goto again; | 
|  | 1239 |  | 
|  | 1240 | return 0; | 
|  | 1241 | } | 
|  | 1242 |  | 
| Chris Mason | e999376 | 2011-06-17 16:14:09 -0400 | [diff] [blame] | 1243 | void btrfs_assert_delayed_root_empty(struct btrfs_root *root) | 
|  | 1244 | { | 
|  | 1245 | struct btrfs_delayed_root *delayed_root; | 
|  | 1246 | delayed_root = btrfs_get_delayed_root(root); | 
|  | 1247 | WARN_ON(btrfs_first_delayed_node(delayed_root)); | 
|  | 1248 | } | 
|  | 1249 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1250 | void btrfs_balance_delayed_items(struct btrfs_root *root) | 
|  | 1251 | { | 
|  | 1252 | struct btrfs_delayed_root *delayed_root; | 
|  | 1253 |  | 
|  | 1254 | delayed_root = btrfs_get_delayed_root(root); | 
|  | 1255 |  | 
|  | 1256 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) | 
|  | 1257 | return; | 
|  | 1258 |  | 
|  | 1259 | if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { | 
|  | 1260 | int ret; | 
|  | 1261 | ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); | 
|  | 1262 | if (ret) | 
|  | 1263 | return; | 
|  | 1264 |  | 
|  | 1265 | wait_event_interruptible_timeout( | 
|  | 1266 | delayed_root->wait, | 
|  | 1267 | (atomic_read(&delayed_root->items) < | 
|  | 1268 | BTRFS_DELAYED_BACKGROUND), | 
|  | 1269 | HZ); | 
|  | 1270 | return; | 
|  | 1271 | } | 
|  | 1272 |  | 
|  | 1273 | btrfs_wq_run_delayed_node(delayed_root, root, 0); | 
|  | 1274 | } | 
|  | 1275 |  | 
|  | 1276 | int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, | 
|  | 1277 | struct btrfs_root *root, const char *name, | 
|  | 1278 | int name_len, struct inode *dir, | 
|  | 1279 | struct btrfs_disk_key *disk_key, u8 type, | 
|  | 1280 | u64 index) | 
|  | 1281 | { | 
|  | 1282 | struct btrfs_delayed_node *delayed_node; | 
|  | 1283 | struct btrfs_delayed_item *delayed_item; | 
|  | 1284 | struct btrfs_dir_item *dir_item; | 
|  | 1285 | int ret; | 
|  | 1286 |  | 
|  | 1287 | delayed_node = btrfs_get_or_create_delayed_node(dir); | 
|  | 1288 | if (IS_ERR(delayed_node)) | 
|  | 1289 | return PTR_ERR(delayed_node); | 
|  | 1290 |  | 
|  | 1291 | delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); | 
|  | 1292 | if (!delayed_item) { | 
|  | 1293 | ret = -ENOMEM; | 
|  | 1294 | goto release_node; | 
|  | 1295 | } | 
|  | 1296 |  | 
|  | 1297 | ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); | 
|  | 1298 | /* | 
|  | 1299 | * we have reserved enough space when we start a new transaction, | 
|  | 1300 | * so reserving metadata failure is impossible | 
|  | 1301 | */ | 
|  | 1302 | BUG_ON(ret); | 
|  | 1303 |  | 
| Chris Mason | 0d0ca30 | 2011-05-22 07:11:22 -0400 | [diff] [blame] | 1304 | delayed_item->key.objectid = btrfs_ino(dir); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1305 | btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); | 
|  | 1306 | delayed_item->key.offset = index; | 
|  | 1307 |  | 
|  | 1308 | dir_item = (struct btrfs_dir_item *)delayed_item->data; | 
|  | 1309 | dir_item->location = *disk_key; | 
|  | 1310 | dir_item->transid = cpu_to_le64(trans->transid); | 
|  | 1311 | dir_item->data_len = 0; | 
|  | 1312 | dir_item->name_len = cpu_to_le16(name_len); | 
|  | 1313 | dir_item->type = type; | 
|  | 1314 | memcpy((char *)(dir_item + 1), name, name_len); | 
|  | 1315 |  | 
|  | 1316 | mutex_lock(&delayed_node->mutex); | 
|  | 1317 | ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); | 
|  | 1318 | if (unlikely(ret)) { | 
|  | 1319 | printk(KERN_ERR "err add delayed dir index item(name: %s) into " | 
|  | 1320 | "the insertion tree of the delayed node" | 
|  | 1321 | "(root id: %llu, inode id: %llu, errno: %d)\n", | 
|  | 1322 | name, | 
|  | 1323 | (unsigned long long)delayed_node->root->objectid, | 
|  | 1324 | (unsigned long long)delayed_node->inode_id, | 
|  | 1325 | ret); | 
|  | 1326 | BUG(); | 
|  | 1327 | } | 
|  | 1328 | mutex_unlock(&delayed_node->mutex); | 
|  | 1329 |  | 
|  | 1330 | release_node: | 
|  | 1331 | btrfs_release_delayed_node(delayed_node); | 
|  | 1332 | return ret; | 
|  | 1333 | } | 
|  | 1334 |  | 
|  | 1335 | static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, | 
|  | 1336 | struct btrfs_delayed_node *node, | 
|  | 1337 | struct btrfs_key *key) | 
|  | 1338 | { | 
|  | 1339 | struct btrfs_delayed_item *item; | 
|  | 1340 |  | 
|  | 1341 | mutex_lock(&node->mutex); | 
|  | 1342 | item = __btrfs_lookup_delayed_insertion_item(node, key); | 
|  | 1343 | if (!item) { | 
|  | 1344 | mutex_unlock(&node->mutex); | 
|  | 1345 | return 1; | 
|  | 1346 | } | 
|  | 1347 |  | 
|  | 1348 | btrfs_delayed_item_release_metadata(root, item); | 
|  | 1349 | btrfs_release_delayed_item(item); | 
|  | 1350 | mutex_unlock(&node->mutex); | 
|  | 1351 | return 0; | 
|  | 1352 | } | 
|  | 1353 |  | 
|  | 1354 | int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, | 
|  | 1355 | struct btrfs_root *root, struct inode *dir, | 
|  | 1356 | u64 index) | 
|  | 1357 | { | 
|  | 1358 | struct btrfs_delayed_node *node; | 
|  | 1359 | struct btrfs_delayed_item *item; | 
|  | 1360 | struct btrfs_key item_key; | 
|  | 1361 | int ret; | 
|  | 1362 |  | 
|  | 1363 | node = btrfs_get_or_create_delayed_node(dir); | 
|  | 1364 | if (IS_ERR(node)) | 
|  | 1365 | return PTR_ERR(node); | 
|  | 1366 |  | 
| Chris Mason | 0d0ca30 | 2011-05-22 07:11:22 -0400 | [diff] [blame] | 1367 | item_key.objectid = btrfs_ino(dir); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1368 | btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); | 
|  | 1369 | item_key.offset = index; | 
|  | 1370 |  | 
|  | 1371 | ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); | 
|  | 1372 | if (!ret) | 
|  | 1373 | goto end; | 
|  | 1374 |  | 
|  | 1375 | item = btrfs_alloc_delayed_item(0); | 
|  | 1376 | if (!item) { | 
|  | 1377 | ret = -ENOMEM; | 
|  | 1378 | goto end; | 
|  | 1379 | } | 
|  | 1380 |  | 
|  | 1381 | item->key = item_key; | 
|  | 1382 |  | 
|  | 1383 | ret = btrfs_delayed_item_reserve_metadata(trans, root, item); | 
|  | 1384 | /* | 
|  | 1385 | * we have reserved enough space when we start a new transaction, | 
|  | 1386 | * so reserving metadata failure is impossible. | 
|  | 1387 | */ | 
|  | 1388 | BUG_ON(ret); | 
|  | 1389 |  | 
|  | 1390 | mutex_lock(&node->mutex); | 
|  | 1391 | ret = __btrfs_add_delayed_deletion_item(node, item); | 
|  | 1392 | if (unlikely(ret)) { | 
|  | 1393 | printk(KERN_ERR "err add delayed dir index item(index: %llu) " | 
|  | 1394 | "into the deletion tree of the delayed node" | 
|  | 1395 | "(root id: %llu, inode id: %llu, errno: %d)\n", | 
|  | 1396 | (unsigned long long)index, | 
|  | 1397 | (unsigned long long)node->root->objectid, | 
|  | 1398 | (unsigned long long)node->inode_id, | 
|  | 1399 | ret); | 
|  | 1400 | BUG(); | 
|  | 1401 | } | 
|  | 1402 | mutex_unlock(&node->mutex); | 
|  | 1403 | end: | 
|  | 1404 | btrfs_release_delayed_node(node); | 
|  | 1405 | return ret; | 
|  | 1406 | } | 
|  | 1407 |  | 
|  | 1408 | int btrfs_inode_delayed_dir_index_count(struct inode *inode) | 
|  | 1409 | { | 
| Miao Xie | 2f7e33d | 2011-06-23 07:27:13 +0000 | [diff] [blame] | 1410 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1411 |  | 
|  | 1412 | if (!delayed_node) | 
|  | 1413 | return -ENOENT; | 
|  | 1414 |  | 
|  | 1415 | /* | 
|  | 1416 | * Since we have held i_mutex of this directory, it is impossible that | 
|  | 1417 | * a new directory index is added into the delayed node and index_cnt | 
|  | 1418 | * is updated now. So we needn't lock the delayed node. | 
|  | 1419 | */ | 
| Miao Xie | 2f7e33d | 2011-06-23 07:27:13 +0000 | [diff] [blame] | 1420 | if (!delayed_node->index_cnt) { | 
|  | 1421 | btrfs_release_delayed_node(delayed_node); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1422 | return -EINVAL; | 
| Miao Xie | 2f7e33d | 2011-06-23 07:27:13 +0000 | [diff] [blame] | 1423 | } | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1424 |  | 
|  | 1425 | BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; | 
| Miao Xie | 2f7e33d | 2011-06-23 07:27:13 +0000 | [diff] [blame] | 1426 | btrfs_release_delayed_node(delayed_node); | 
|  | 1427 | return 0; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1428 | } | 
|  | 1429 |  | 
|  | 1430 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 
|  | 1431 | struct list_head *del_list) | 
|  | 1432 | { | 
|  | 1433 | struct btrfs_delayed_node *delayed_node; | 
|  | 1434 | struct btrfs_delayed_item *item; | 
|  | 1435 |  | 
|  | 1436 | delayed_node = btrfs_get_delayed_node(inode); | 
|  | 1437 | if (!delayed_node) | 
|  | 1438 | return; | 
|  | 1439 |  | 
|  | 1440 | mutex_lock(&delayed_node->mutex); | 
|  | 1441 | item = __btrfs_first_delayed_insertion_item(delayed_node); | 
|  | 1442 | while (item) { | 
|  | 1443 | atomic_inc(&item->refs); | 
|  | 1444 | list_add_tail(&item->readdir_list, ins_list); | 
|  | 1445 | item = __btrfs_next_delayed_item(item); | 
|  | 1446 | } | 
|  | 1447 |  | 
|  | 1448 | item = __btrfs_first_delayed_deletion_item(delayed_node); | 
|  | 1449 | while (item) { | 
|  | 1450 | atomic_inc(&item->refs); | 
|  | 1451 | list_add_tail(&item->readdir_list, del_list); | 
|  | 1452 | item = __btrfs_next_delayed_item(item); | 
|  | 1453 | } | 
|  | 1454 | mutex_unlock(&delayed_node->mutex); | 
|  | 1455 | /* | 
|  | 1456 | * This delayed node is still cached in the btrfs inode, so refs | 
|  | 1457 | * must be > 1 now, and we needn't check it is going to be freed | 
|  | 1458 | * or not. | 
|  | 1459 | * | 
|  | 1460 | * Besides that, this function is used to read dir, we do not | 
|  | 1461 | * insert/delete delayed items in this period. So we also needn't | 
|  | 1462 | * requeue or dequeue this delayed node. | 
|  | 1463 | */ | 
|  | 1464 | atomic_dec(&delayed_node->refs); | 
|  | 1465 | } | 
|  | 1466 |  | 
|  | 1467 | void btrfs_put_delayed_items(struct list_head *ins_list, | 
|  | 1468 | struct list_head *del_list) | 
|  | 1469 | { | 
|  | 1470 | struct btrfs_delayed_item *curr, *next; | 
|  | 1471 |  | 
|  | 1472 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | 
|  | 1473 | list_del(&curr->readdir_list); | 
|  | 1474 | if (atomic_dec_and_test(&curr->refs)) | 
|  | 1475 | kfree(curr); | 
|  | 1476 | } | 
|  | 1477 |  | 
|  | 1478 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | 
|  | 1479 | list_del(&curr->readdir_list); | 
|  | 1480 | if (atomic_dec_and_test(&curr->refs)) | 
|  | 1481 | kfree(curr); | 
|  | 1482 | } | 
|  | 1483 | } | 
|  | 1484 |  | 
|  | 1485 | int btrfs_should_delete_dir_index(struct list_head *del_list, | 
|  | 1486 | u64 index) | 
|  | 1487 | { | 
|  | 1488 | struct btrfs_delayed_item *curr, *next; | 
|  | 1489 | int ret; | 
|  | 1490 |  | 
|  | 1491 | if (list_empty(del_list)) | 
|  | 1492 | return 0; | 
|  | 1493 |  | 
|  | 1494 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | 
|  | 1495 | if (curr->key.offset > index) | 
|  | 1496 | break; | 
|  | 1497 |  | 
|  | 1498 | list_del(&curr->readdir_list); | 
|  | 1499 | ret = (curr->key.offset == index); | 
|  | 1500 |  | 
|  | 1501 | if (atomic_dec_and_test(&curr->refs)) | 
|  | 1502 | kfree(curr); | 
|  | 1503 |  | 
|  | 1504 | if (ret) | 
|  | 1505 | return 1; | 
|  | 1506 | else | 
|  | 1507 | continue; | 
|  | 1508 | } | 
|  | 1509 | return 0; | 
|  | 1510 | } | 
|  | 1511 |  | 
|  | 1512 | /* | 
|  | 1513 | * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree | 
|  | 1514 | * | 
|  | 1515 | */ | 
|  | 1516 | int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, | 
|  | 1517 | filldir_t filldir, | 
|  | 1518 | struct list_head *ins_list) | 
|  | 1519 | { | 
|  | 1520 | struct btrfs_dir_item *di; | 
|  | 1521 | struct btrfs_delayed_item *curr, *next; | 
|  | 1522 | struct btrfs_key location; | 
|  | 1523 | char *name; | 
|  | 1524 | int name_len; | 
|  | 1525 | int over = 0; | 
|  | 1526 | unsigned char d_type; | 
|  | 1527 |  | 
|  | 1528 | if (list_empty(ins_list)) | 
|  | 1529 | return 0; | 
|  | 1530 |  | 
|  | 1531 | /* | 
|  | 1532 | * Changing the data of the delayed item is impossible. So | 
|  | 1533 | * we needn't lock them. And we have held i_mutex of the | 
|  | 1534 | * directory, nobody can delete any directory indexes now. | 
|  | 1535 | */ | 
|  | 1536 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | 
|  | 1537 | list_del(&curr->readdir_list); | 
|  | 1538 |  | 
|  | 1539 | if (curr->key.offset < filp->f_pos) { | 
|  | 1540 | if (atomic_dec_and_test(&curr->refs)) | 
|  | 1541 | kfree(curr); | 
|  | 1542 | continue; | 
|  | 1543 | } | 
|  | 1544 |  | 
|  | 1545 | filp->f_pos = curr->key.offset; | 
|  | 1546 |  | 
|  | 1547 | di = (struct btrfs_dir_item *)curr->data; | 
|  | 1548 | name = (char *)(di + 1); | 
|  | 1549 | name_len = le16_to_cpu(di->name_len); | 
|  | 1550 |  | 
|  | 1551 | d_type = btrfs_filetype_table[di->type]; | 
|  | 1552 | btrfs_disk_key_to_cpu(&location, &di->location); | 
|  | 1553 |  | 
|  | 1554 | over = filldir(dirent, name, name_len, curr->key.offset, | 
|  | 1555 | location.objectid, d_type); | 
|  | 1556 |  | 
|  | 1557 | if (atomic_dec_and_test(&curr->refs)) | 
|  | 1558 | kfree(curr); | 
|  | 1559 |  | 
|  | 1560 | if (over) | 
|  | 1561 | return 1; | 
|  | 1562 | } | 
|  | 1563 | return 0; | 
|  | 1564 | } | 
|  | 1565 |  | 
|  | 1566 | BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, | 
|  | 1567 | generation, 64); | 
|  | 1568 | BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, | 
|  | 1569 | sequence, 64); | 
|  | 1570 | BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, | 
|  | 1571 | transid, 64); | 
|  | 1572 | BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); | 
|  | 1573 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, | 
|  | 1574 | nbytes, 64); | 
|  | 1575 | BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, | 
|  | 1576 | block_group, 64); | 
|  | 1577 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); | 
|  | 1578 | BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); | 
|  | 1579 | BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); | 
|  | 1580 | BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); | 
|  | 1581 | BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); | 
|  | 1582 | BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); | 
|  | 1583 |  | 
|  | 1584 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); | 
|  | 1585 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); | 
|  | 1586 |  | 
|  | 1587 | static void fill_stack_inode_item(struct btrfs_trans_handle *trans, | 
|  | 1588 | struct btrfs_inode_item *inode_item, | 
|  | 1589 | struct inode *inode) | 
|  | 1590 | { | 
|  | 1591 | btrfs_set_stack_inode_uid(inode_item, inode->i_uid); | 
|  | 1592 | btrfs_set_stack_inode_gid(inode_item, inode->i_gid); | 
|  | 1593 | btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); | 
|  | 1594 | btrfs_set_stack_inode_mode(inode_item, inode->i_mode); | 
|  | 1595 | btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); | 
|  | 1596 | btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); | 
|  | 1597 | btrfs_set_stack_inode_generation(inode_item, | 
|  | 1598 | BTRFS_I(inode)->generation); | 
|  | 1599 | btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); | 
|  | 1600 | btrfs_set_stack_inode_transid(inode_item, trans->transid); | 
|  | 1601 | btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); | 
|  | 1602 | btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); | 
| Chris Mason | ff5714c | 2011-05-28 07:00:39 -0400 | [diff] [blame] | 1603 | btrfs_set_stack_inode_block_group(inode_item, 0); | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1604 |  | 
|  | 1605 | btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), | 
|  | 1606 | inode->i_atime.tv_sec); | 
|  | 1607 | btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), | 
|  | 1608 | inode->i_atime.tv_nsec); | 
|  | 1609 |  | 
|  | 1610 | btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), | 
|  | 1611 | inode->i_mtime.tv_sec); | 
|  | 1612 | btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), | 
|  | 1613 | inode->i_mtime.tv_nsec); | 
|  | 1614 |  | 
|  | 1615 | btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), | 
|  | 1616 | inode->i_ctime.tv_sec); | 
|  | 1617 | btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), | 
|  | 1618 | inode->i_ctime.tv_nsec); | 
|  | 1619 | } | 
|  | 1620 |  | 
| Miao Xie | 2f7e33d | 2011-06-23 07:27:13 +0000 | [diff] [blame] | 1621 | int btrfs_fill_inode(struct inode *inode, u32 *rdev) | 
|  | 1622 | { | 
|  | 1623 | struct btrfs_delayed_node *delayed_node; | 
|  | 1624 | struct btrfs_inode_item *inode_item; | 
|  | 1625 | struct btrfs_timespec *tspec; | 
|  | 1626 |  | 
|  | 1627 | delayed_node = btrfs_get_delayed_node(inode); | 
|  | 1628 | if (!delayed_node) | 
|  | 1629 | return -ENOENT; | 
|  | 1630 |  | 
|  | 1631 | mutex_lock(&delayed_node->mutex); | 
|  | 1632 | if (!delayed_node->inode_dirty) { | 
|  | 1633 | mutex_unlock(&delayed_node->mutex); | 
|  | 1634 | btrfs_release_delayed_node(delayed_node); | 
|  | 1635 | return -ENOENT; | 
|  | 1636 | } | 
|  | 1637 |  | 
|  | 1638 | inode_item = &delayed_node->inode_item; | 
|  | 1639 |  | 
|  | 1640 | inode->i_uid = btrfs_stack_inode_uid(inode_item); | 
|  | 1641 | inode->i_gid = btrfs_stack_inode_gid(inode_item); | 
|  | 1642 | btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); | 
|  | 1643 | inode->i_mode = btrfs_stack_inode_mode(inode_item); | 
|  | 1644 | inode->i_nlink = btrfs_stack_inode_nlink(inode_item); | 
|  | 1645 | inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); | 
|  | 1646 | BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); | 
|  | 1647 | BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item); | 
|  | 1648 | inode->i_rdev = 0; | 
|  | 1649 | *rdev = btrfs_stack_inode_rdev(inode_item); | 
|  | 1650 | BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); | 
|  | 1651 |  | 
|  | 1652 | tspec = btrfs_inode_atime(inode_item); | 
|  | 1653 | inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); | 
|  | 1654 | inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | 
|  | 1655 |  | 
|  | 1656 | tspec = btrfs_inode_mtime(inode_item); | 
|  | 1657 | inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); | 
|  | 1658 | inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | 
|  | 1659 |  | 
|  | 1660 | tspec = btrfs_inode_ctime(inode_item); | 
|  | 1661 | inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); | 
|  | 1662 | inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | 
|  | 1663 |  | 
|  | 1664 | inode->i_generation = BTRFS_I(inode)->generation; | 
|  | 1665 | BTRFS_I(inode)->index_cnt = (u64)-1; | 
|  | 1666 |  | 
|  | 1667 | mutex_unlock(&delayed_node->mutex); | 
|  | 1668 | btrfs_release_delayed_node(delayed_node); | 
|  | 1669 | return 0; | 
|  | 1670 | } | 
|  | 1671 |  | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1672 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, | 
|  | 1673 | struct btrfs_root *root, struct inode *inode) | 
|  | 1674 | { | 
|  | 1675 | struct btrfs_delayed_node *delayed_node; | 
| David Sterba | aa0467d | 2011-06-03 16:29:08 +0200 | [diff] [blame] | 1676 | int ret = 0; | 
| Miao Xie | 16cdcec | 2011-04-22 18:12:22 +0800 | [diff] [blame] | 1677 |  | 
|  | 1678 | delayed_node = btrfs_get_or_create_delayed_node(inode); | 
|  | 1679 | if (IS_ERR(delayed_node)) | 
|  | 1680 | return PTR_ERR(delayed_node); | 
|  | 1681 |  | 
|  | 1682 | mutex_lock(&delayed_node->mutex); | 
|  | 1683 | if (delayed_node->inode_dirty) { | 
|  | 1684 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | 
|  | 1685 | goto release_node; | 
|  | 1686 | } | 
|  | 1687 |  | 
|  | 1688 | ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); | 
|  | 1689 | /* | 
|  | 1690 | * we must reserve enough space when we start a new transaction, | 
|  | 1691 | * so reserving metadata failure is impossible | 
|  | 1692 | */ | 
|  | 1693 | BUG_ON(ret); | 
|  | 1694 |  | 
|  | 1695 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | 
|  | 1696 | delayed_node->inode_dirty = 1; | 
|  | 1697 | delayed_node->count++; | 
|  | 1698 | atomic_inc(&root->fs_info->delayed_root->items); | 
|  | 1699 | release_node: | 
|  | 1700 | mutex_unlock(&delayed_node->mutex); | 
|  | 1701 | btrfs_release_delayed_node(delayed_node); | 
|  | 1702 | return ret; | 
|  | 1703 | } | 
|  | 1704 |  | 
|  | 1705 | static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) | 
|  | 1706 | { | 
|  | 1707 | struct btrfs_root *root = delayed_node->root; | 
|  | 1708 | struct btrfs_delayed_item *curr_item, *prev_item; | 
|  | 1709 |  | 
|  | 1710 | mutex_lock(&delayed_node->mutex); | 
|  | 1711 | curr_item = __btrfs_first_delayed_insertion_item(delayed_node); | 
|  | 1712 | while (curr_item) { | 
|  | 1713 | btrfs_delayed_item_release_metadata(root, curr_item); | 
|  | 1714 | prev_item = curr_item; | 
|  | 1715 | curr_item = __btrfs_next_delayed_item(prev_item); | 
|  | 1716 | btrfs_release_delayed_item(prev_item); | 
|  | 1717 | } | 
|  | 1718 |  | 
|  | 1719 | curr_item = __btrfs_first_delayed_deletion_item(delayed_node); | 
|  | 1720 | while (curr_item) { | 
|  | 1721 | btrfs_delayed_item_release_metadata(root, curr_item); | 
|  | 1722 | prev_item = curr_item; | 
|  | 1723 | curr_item = __btrfs_next_delayed_item(prev_item); | 
|  | 1724 | btrfs_release_delayed_item(prev_item); | 
|  | 1725 | } | 
|  | 1726 |  | 
|  | 1727 | if (delayed_node->inode_dirty) { | 
|  | 1728 | btrfs_delayed_inode_release_metadata(root, delayed_node); | 
|  | 1729 | btrfs_release_delayed_inode(delayed_node); | 
|  | 1730 | } | 
|  | 1731 | mutex_unlock(&delayed_node->mutex); | 
|  | 1732 | } | 
|  | 1733 |  | 
|  | 1734 | void btrfs_kill_delayed_inode_items(struct inode *inode) | 
|  | 1735 | { | 
|  | 1736 | struct btrfs_delayed_node *delayed_node; | 
|  | 1737 |  | 
|  | 1738 | delayed_node = btrfs_get_delayed_node(inode); | 
|  | 1739 | if (!delayed_node) | 
|  | 1740 | return; | 
|  | 1741 |  | 
|  | 1742 | __btrfs_kill_delayed_node(delayed_node); | 
|  | 1743 | btrfs_release_delayed_node(delayed_node); | 
|  | 1744 | } | 
|  | 1745 |  | 
|  | 1746 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | 
|  | 1747 | { | 
|  | 1748 | u64 inode_id = 0; | 
|  | 1749 | struct btrfs_delayed_node *delayed_nodes[8]; | 
|  | 1750 | int i, n; | 
|  | 1751 |  | 
|  | 1752 | while (1) { | 
|  | 1753 | spin_lock(&root->inode_lock); | 
|  | 1754 | n = radix_tree_gang_lookup(&root->delayed_nodes_tree, | 
|  | 1755 | (void **)delayed_nodes, inode_id, | 
|  | 1756 | ARRAY_SIZE(delayed_nodes)); | 
|  | 1757 | if (!n) { | 
|  | 1758 | spin_unlock(&root->inode_lock); | 
|  | 1759 | break; | 
|  | 1760 | } | 
|  | 1761 |  | 
|  | 1762 | inode_id = delayed_nodes[n - 1]->inode_id + 1; | 
|  | 1763 |  | 
|  | 1764 | for (i = 0; i < n; i++) | 
|  | 1765 | atomic_inc(&delayed_nodes[i]->refs); | 
|  | 1766 | spin_unlock(&root->inode_lock); | 
|  | 1767 |  | 
|  | 1768 | for (i = 0; i < n; i++) { | 
|  | 1769 | __btrfs_kill_delayed_node(delayed_nodes[i]); | 
|  | 1770 | btrfs_release_delayed_node(delayed_nodes[i]); | 
|  | 1771 | } | 
|  | 1772 | } | 
|  | 1773 | } |