| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2009 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or | 
 | 5 |  * modify it under the terms of the GNU General Public | 
 | 6 |  * License v2 as published by the Free Software Foundation. | 
 | 7 |  * | 
 | 8 |  * This program is distributed in the hope that it will be useful, | 
 | 9 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 10 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 11 |  * General Public License for more details. | 
 | 12 |  * | 
 | 13 |  * You should have received a copy of the GNU General Public | 
 | 14 |  * License along with this program; if not, write to the | 
 | 15 |  * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
 | 16 |  * Boston, MA 021110-1307, USA. | 
 | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/sched.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 21 | #include <linux/sort.h> | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 22 | #include "ctree.h" | 
 | 23 | #include "delayed-ref.h" | 
 | 24 | #include "transaction.h" | 
 | 25 |  | 
 | 26 | /* | 
 | 27 |  * delayed back reference update tracking.  For subvolume trees | 
 | 28 |  * we queue up extent allocations and backref maintenance for | 
 | 29 |  * delayed processing.   This avoids deep call chains where we | 
 | 30 |  * add extents in the middle of btrfs_search_slot, and it allows | 
 | 31 |  * us to buffer up frequently modified backrefs in an rb tree instead | 
 | 32 |  * of hammering updates on the extent allocation tree. | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 33 |  */ | 
 | 34 |  | 
 | 35 | /* | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 36 |  * compare two delayed tree backrefs with same bytenr and type | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 37 |  */ | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 38 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, | 
 | 39 | 			  struct btrfs_delayed_tree_ref *ref1) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 40 | { | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 41 | 	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { | 
 | 42 | 		if (ref1->root < ref2->root) | 
 | 43 | 			return -1; | 
 | 44 | 		if (ref1->root > ref2->root) | 
 | 45 | 			return 1; | 
 | 46 | 	} else { | 
 | 47 | 		if (ref1->parent < ref2->parent) | 
 | 48 | 			return -1; | 
 | 49 | 		if (ref1->parent > ref2->parent) | 
 | 50 | 			return 1; | 
 | 51 | 	} | 
 | 52 | 	return 0; | 
 | 53 | } | 
 | 54 |  | 
 | 55 | /* | 
 | 56 |  * compare two delayed data backrefs with same bytenr and type | 
 | 57 |  */ | 
 | 58 | static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, | 
 | 59 | 			  struct btrfs_delayed_data_ref *ref1) | 
 | 60 | { | 
 | 61 | 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) { | 
 | 62 | 		if (ref1->root < ref2->root) | 
 | 63 | 			return -1; | 
 | 64 | 		if (ref1->root > ref2->root) | 
 | 65 | 			return 1; | 
 | 66 | 		if (ref1->objectid < ref2->objectid) | 
 | 67 | 			return -1; | 
 | 68 | 		if (ref1->objectid > ref2->objectid) | 
 | 69 | 			return 1; | 
 | 70 | 		if (ref1->offset < ref2->offset) | 
 | 71 | 			return -1; | 
 | 72 | 		if (ref1->offset > ref2->offset) | 
 | 73 | 			return 1; | 
 | 74 | 	} else { | 
 | 75 | 		if (ref1->parent < ref2->parent) | 
 | 76 | 			return -1; | 
 | 77 | 		if (ref1->parent > ref2->parent) | 
 | 78 | 			return 1; | 
 | 79 | 	} | 
 | 80 | 	return 0; | 
 | 81 | } | 
 | 82 |  | 
 | 83 | /* | 
 | 84 |  * entries in the rb tree are ordered by the byte number of the extent, | 
 | 85 |  * type of the delayed backrefs and content of delayed backrefs. | 
 | 86 |  */ | 
 | 87 | static int comp_entry(struct btrfs_delayed_ref_node *ref2, | 
 | 88 | 		      struct btrfs_delayed_ref_node *ref1) | 
 | 89 | { | 
 | 90 | 	if (ref1->bytenr < ref2->bytenr) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 91 | 		return -1; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 92 | 	if (ref1->bytenr > ref2->bytenr) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 93 | 		return 1; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 94 | 	if (ref1->is_head && ref2->is_head) | 
 | 95 | 		return 0; | 
 | 96 | 	if (ref2->is_head) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 97 | 		return -1; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 98 | 	if (ref1->is_head) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 99 | 		return 1; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 100 | 	if (ref1->type < ref2->type) | 
 | 101 | 		return -1; | 
 | 102 | 	if (ref1->type > ref2->type) | 
 | 103 | 		return 1; | 
| Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 104 | 	/* merging of sequenced refs is not allowed */ | 
 | 105 | 	if (ref1->seq < ref2->seq) | 
 | 106 | 		return -1; | 
 | 107 | 	if (ref1->seq > ref2->seq) | 
 | 108 | 		return 1; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 109 | 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || | 
 | 110 | 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { | 
 | 111 | 		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), | 
 | 112 | 				      btrfs_delayed_node_to_tree_ref(ref1)); | 
 | 113 | 	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY || | 
 | 114 | 		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) { | 
 | 115 | 		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2), | 
 | 116 | 				      btrfs_delayed_node_to_data_ref(ref1)); | 
 | 117 | 	} | 
 | 118 | 	BUG(); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 119 | 	return 0; | 
 | 120 | } | 
 | 121 |  | 
 | 122 | /* | 
 | 123 |  * insert a new ref into the rbtree.  This returns any existing refs | 
 | 124 |  * for the same (bytenr,parent) tuple, or NULL if the new node was properly | 
 | 125 |  * inserted. | 
 | 126 |  */ | 
 | 127 | static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root, | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 128 | 						  struct rb_node *node) | 
 | 129 | { | 
 | 130 | 	struct rb_node **p = &root->rb_node; | 
 | 131 | 	struct rb_node *parent_node = NULL; | 
 | 132 | 	struct btrfs_delayed_ref_node *entry; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 133 | 	struct btrfs_delayed_ref_node *ins; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 134 | 	int cmp; | 
 | 135 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 136 | 	ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 137 | 	while (*p) { | 
 | 138 | 		parent_node = *p; | 
 | 139 | 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, | 
 | 140 | 				 rb_node); | 
 | 141 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 142 | 		cmp = comp_entry(entry, ins); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 143 | 		if (cmp < 0) | 
 | 144 | 			p = &(*p)->rb_left; | 
 | 145 | 		else if (cmp > 0) | 
 | 146 | 			p = &(*p)->rb_right; | 
 | 147 | 		else | 
 | 148 | 			return entry; | 
 | 149 | 	} | 
 | 150 |  | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 151 | 	rb_link_node(node, parent_node, p); | 
 | 152 | 	rb_insert_color(node, root); | 
 | 153 | 	return NULL; | 
 | 154 | } | 
 | 155 |  | 
 | 156 | /* | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 157 |  * find an head entry based on bytenr. This returns the delayed ref | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 158 |  * head if it was able to find one, or NULL if nothing was in that spot. | 
 | 159 |  * If return_bigger is given, the next bigger entry is returned if no exact | 
 | 160 |  * match is found. | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 161 |  */ | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 162 | static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root, | 
 | 163 | 				  u64 bytenr, | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 164 | 				  struct btrfs_delayed_ref_node **last, | 
 | 165 | 				  int return_bigger) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 166 | { | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 167 | 	struct rb_node *n; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 168 | 	struct btrfs_delayed_ref_node *entry; | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 169 | 	int cmp = 0; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 170 |  | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 171 | again: | 
 | 172 | 	n = root->rb_node; | 
 | 173 | 	entry = NULL; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 174 | 	while (n) { | 
 | 175 | 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); | 
 | 176 | 		WARN_ON(!entry->in_tree); | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 177 | 		if (last) | 
 | 178 | 			*last = entry; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 179 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 180 | 		if (bytenr < entry->bytenr) | 
 | 181 | 			cmp = -1; | 
 | 182 | 		else if (bytenr > entry->bytenr) | 
 | 183 | 			cmp = 1; | 
 | 184 | 		else if (!btrfs_delayed_ref_is_head(entry)) | 
 | 185 | 			cmp = 1; | 
 | 186 | 		else | 
 | 187 | 			cmp = 0; | 
 | 188 |  | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 189 | 		if (cmp < 0) | 
 | 190 | 			n = n->rb_left; | 
 | 191 | 		else if (cmp > 0) | 
 | 192 | 			n = n->rb_right; | 
 | 193 | 		else | 
 | 194 | 			return entry; | 
 | 195 | 	} | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 196 | 	if (entry && return_bigger) { | 
 | 197 | 		if (cmp > 0) { | 
 | 198 | 			n = rb_next(&entry->rb_node); | 
 | 199 | 			if (!n) | 
 | 200 | 				n = rb_first(root); | 
 | 201 | 			entry = rb_entry(n, struct btrfs_delayed_ref_node, | 
 | 202 | 					 rb_node); | 
 | 203 | 			bytenr = entry->bytenr; | 
 | 204 | 			return_bigger = 0; | 
 | 205 | 			goto again; | 
 | 206 | 		} | 
 | 207 | 		return entry; | 
 | 208 | 	} | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 209 | 	return NULL; | 
 | 210 | } | 
 | 211 |  | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 212 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, | 
 | 213 | 			   struct btrfs_delayed_ref_head *head) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 214 | { | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 215 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 216 |  | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 217 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 218 | 	assert_spin_locked(&delayed_refs->lock); | 
 | 219 | 	if (mutex_trylock(&head->mutex)) | 
 | 220 | 		return 0; | 
 | 221 |  | 
 | 222 | 	atomic_inc(&head->node.refs); | 
 | 223 | 	spin_unlock(&delayed_refs->lock); | 
 | 224 |  | 
 | 225 | 	mutex_lock(&head->mutex); | 
 | 226 | 	spin_lock(&delayed_refs->lock); | 
 | 227 | 	if (!head->node.in_tree) { | 
 | 228 | 		mutex_unlock(&head->mutex); | 
 | 229 | 		btrfs_put_delayed_ref(&head->node); | 
 | 230 | 		return -EAGAIN; | 
 | 231 | 	} | 
 | 232 | 	btrfs_put_delayed_ref(&head->node); | 
 | 233 | 	return 0; | 
 | 234 | } | 
 | 235 |  | 
| Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 236 | int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | 
 | 237 | 			    u64 seq) | 
 | 238 | { | 
 | 239 | 	struct seq_list *elem; | 
 | 240 |  | 
 | 241 | 	assert_spin_locked(&delayed_refs->lock); | 
 | 242 | 	if (list_empty(&delayed_refs->seq_head)) | 
 | 243 | 		return 0; | 
 | 244 |  | 
 | 245 | 	elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list); | 
 | 246 | 	if (seq >= elem->seq) { | 
 | 247 | 		pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n", | 
 | 248 | 			 seq, elem->seq, delayed_refs); | 
 | 249 | 		return 1; | 
 | 250 | 	} | 
 | 251 | 	return 0; | 
 | 252 | } | 
 | 253 |  | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 254 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | 
 | 255 | 			   struct list_head *cluster, u64 start) | 
 | 256 | { | 
 | 257 | 	int count = 0; | 
 | 258 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
 | 259 | 	struct rb_node *node; | 
 | 260 | 	struct btrfs_delayed_ref_node *ref; | 
 | 261 | 	struct btrfs_delayed_ref_head *head; | 
 | 262 |  | 
 | 263 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 264 | 	if (start == 0) { | 
 | 265 | 		node = rb_first(&delayed_refs->root); | 
 | 266 | 	} else { | 
 | 267 | 		ref = NULL; | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 268 | 		find_ref_head(&delayed_refs->root, start + 1, &ref, 1); | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 269 | 		if (ref) { | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 270 | 			node = &ref->rb_node; | 
 | 271 | 		} else | 
 | 272 | 			node = rb_first(&delayed_refs->root); | 
 | 273 | 	} | 
 | 274 | again: | 
 | 275 | 	while (node && count < 32) { | 
 | 276 | 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 277 | 		if (btrfs_delayed_ref_is_head(ref)) { | 
 | 278 | 			head = btrfs_delayed_node_to_head(ref); | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 279 | 			if (list_empty(&head->cluster)) { | 
 | 280 | 				list_add_tail(&head->cluster, cluster); | 
 | 281 | 				delayed_refs->run_delayed_start = | 
 | 282 | 					head->node.bytenr; | 
 | 283 | 				count++; | 
 | 284 |  | 
 | 285 | 				WARN_ON(delayed_refs->num_heads_ready == 0); | 
 | 286 | 				delayed_refs->num_heads_ready--; | 
 | 287 | 			} else if (count) { | 
 | 288 | 				/* the goal of the clustering is to find extents | 
 | 289 | 				 * that are likely to end up in the same extent | 
 | 290 | 				 * leaf on disk.  So, we don't want them spread | 
 | 291 | 				 * all over the tree.  Stop now if we've hit | 
 | 292 | 				 * a head that was already in use | 
 | 293 | 				 */ | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 294 | 				break; | 
 | 295 | 			} | 
 | 296 | 		} | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 297 | 		node = rb_next(node); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 298 | 	} | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 299 | 	if (count) { | 
 | 300 | 		return 0; | 
 | 301 | 	} else if (start) { | 
 | 302 | 		/* | 
 | 303 | 		 * we've gone to the end of the rbtree without finding any | 
 | 304 | 		 * clusters.  start from the beginning and try again | 
 | 305 | 		 */ | 
 | 306 | 		start = 0; | 
 | 307 | 		node = rb_first(&delayed_refs->root); | 
 | 308 | 		goto again; | 
 | 309 | 	} | 
 | 310 | 	return 1; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 311 | } | 
 | 312 |  | 
 | 313 | /* | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 314 |  * helper function to update an extent delayed ref in the | 
 | 315 |  * rbtree.  existing and update must both have the same | 
 | 316 |  * bytenr and parent | 
 | 317 |  * | 
 | 318 |  * This may free existing if the update cancels out whatever | 
 | 319 |  * operation it was doing. | 
 | 320 |  */ | 
 | 321 | static noinline void | 
 | 322 | update_existing_ref(struct btrfs_trans_handle *trans, | 
 | 323 | 		    struct btrfs_delayed_ref_root *delayed_refs, | 
 | 324 | 		    struct btrfs_delayed_ref_node *existing, | 
 | 325 | 		    struct btrfs_delayed_ref_node *update) | 
 | 326 | { | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 327 | 	if (update->action != existing->action) { | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 328 | 		/* | 
 | 329 | 		 * this is effectively undoing either an add or a | 
 | 330 | 		 * drop.  We decrement the ref_mod, and if it goes | 
 | 331 | 		 * down to zero we just delete the entry without | 
 | 332 | 		 * every changing the extent allocation tree. | 
 | 333 | 		 */ | 
 | 334 | 		existing->ref_mod--; | 
 | 335 | 		if (existing->ref_mod == 0) { | 
 | 336 | 			rb_erase(&existing->rb_node, | 
 | 337 | 				 &delayed_refs->root); | 
 | 338 | 			existing->in_tree = 0; | 
 | 339 | 			btrfs_put_delayed_ref(existing); | 
 | 340 | 			delayed_refs->num_entries--; | 
 | 341 | 			if (trans->delayed_ref_updates) | 
 | 342 | 				trans->delayed_ref_updates--; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 343 | 		} else { | 
 | 344 | 			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || | 
 | 345 | 				existing->type == BTRFS_SHARED_BLOCK_REF_KEY); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 346 | 		} | 
 | 347 | 	} else { | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 348 | 		WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || | 
 | 349 | 			existing->type == BTRFS_SHARED_BLOCK_REF_KEY); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 350 | 		/* | 
 | 351 | 		 * the action on the existing ref matches | 
 | 352 | 		 * the action on the ref we're trying to add. | 
 | 353 | 		 * Bump the ref_mod by one so the backref that | 
 | 354 | 		 * is eventually added/removed has the correct | 
 | 355 | 		 * reference count | 
 | 356 | 		 */ | 
 | 357 | 		existing->ref_mod += update->ref_mod; | 
 | 358 | 	} | 
 | 359 | } | 
 | 360 |  | 
 | 361 | /* | 
 | 362 |  * helper function to update the accounting in the head ref | 
 | 363 |  * existing and update must have the same bytenr | 
 | 364 |  */ | 
 | 365 | static noinline void | 
 | 366 | update_existing_head_ref(struct btrfs_delayed_ref_node *existing, | 
 | 367 | 			 struct btrfs_delayed_ref_node *update) | 
 | 368 | { | 
 | 369 | 	struct btrfs_delayed_ref_head *existing_ref; | 
 | 370 | 	struct btrfs_delayed_ref_head *ref; | 
 | 371 |  | 
 | 372 | 	existing_ref = btrfs_delayed_node_to_head(existing); | 
 | 373 | 	ref = btrfs_delayed_node_to_head(update); | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 374 | 	BUG_ON(existing_ref->is_data != ref->is_data); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 375 |  | 
 | 376 | 	if (ref->must_insert_reserved) { | 
 | 377 | 		/* if the extent was freed and then | 
 | 378 | 		 * reallocated before the delayed ref | 
 | 379 | 		 * entries were processed, we can end up | 
 | 380 | 		 * with an existing head ref without | 
 | 381 | 		 * the must_insert_reserved flag set. | 
 | 382 | 		 * Set it again here | 
 | 383 | 		 */ | 
 | 384 | 		existing_ref->must_insert_reserved = ref->must_insert_reserved; | 
 | 385 |  | 
 | 386 | 		/* | 
 | 387 | 		 * update the num_bytes so we make sure the accounting | 
 | 388 | 		 * is done correctly | 
 | 389 | 		 */ | 
 | 390 | 		existing->num_bytes = update->num_bytes; | 
 | 391 |  | 
 | 392 | 	} | 
 | 393 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 394 | 	if (ref->extent_op) { | 
 | 395 | 		if (!existing_ref->extent_op) { | 
 | 396 | 			existing_ref->extent_op = ref->extent_op; | 
 | 397 | 		} else { | 
 | 398 | 			if (ref->extent_op->update_key) { | 
 | 399 | 				memcpy(&existing_ref->extent_op->key, | 
 | 400 | 				       &ref->extent_op->key, | 
 | 401 | 				       sizeof(ref->extent_op->key)); | 
 | 402 | 				existing_ref->extent_op->update_key = 1; | 
 | 403 | 			} | 
 | 404 | 			if (ref->extent_op->update_flags) { | 
 | 405 | 				existing_ref->extent_op->flags_to_set |= | 
 | 406 | 					ref->extent_op->flags_to_set; | 
 | 407 | 				existing_ref->extent_op->update_flags = 1; | 
 | 408 | 			} | 
 | 409 | 			kfree(ref->extent_op); | 
 | 410 | 		} | 
 | 411 | 	} | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 412 | 	/* | 
 | 413 | 	 * update the reference mod on the head to reflect this new operation | 
 | 414 | 	 */ | 
 | 415 | 	existing->ref_mod += update->ref_mod; | 
 | 416 | } | 
 | 417 |  | 
 | 418 | /* | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 419 |  * helper function to actually insert a head node into the rbtree. | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 420 |  * this does all the dirty work in terms of maintaining the correct | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 421 |  * overall modification count. | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 422 |  */ | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 423 | static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 424 | 					struct btrfs_trans_handle *trans, | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 425 | 					struct btrfs_delayed_ref_node *ref, | 
 | 426 | 					u64 bytenr, u64 num_bytes, | 
 | 427 | 					int action, int is_data) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 428 | { | 
 | 429 | 	struct btrfs_delayed_ref_node *existing; | 
| Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 430 | 	struct btrfs_delayed_ref_head *head_ref = NULL; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 431 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
 | 432 | 	int count_mod = 1; | 
 | 433 | 	int must_insert_reserved = 0; | 
 | 434 |  | 
 | 435 | 	/* | 
 | 436 | 	 * the head node stores the sum of all the mods, so dropping a ref | 
 | 437 | 	 * should drop the sum in the head node by one. | 
 | 438 | 	 */ | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 439 | 	if (action == BTRFS_UPDATE_DELAYED_HEAD) | 
 | 440 | 		count_mod = 0; | 
 | 441 | 	else if (action == BTRFS_DROP_DELAYED_REF) | 
 | 442 | 		count_mod = -1; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 443 |  | 
 | 444 | 	/* | 
 | 445 | 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update | 
 | 446 | 	 * the reserved accounting when the extent is finally added, or | 
 | 447 | 	 * if a later modification deletes the delayed ref without ever | 
 | 448 | 	 * inserting the extent into the extent allocation tree. | 
 | 449 | 	 * ref->must_insert_reserved is the flag used to record | 
 | 450 | 	 * that accounting mods are required. | 
 | 451 | 	 * | 
 | 452 | 	 * Once we record must_insert_reserved, switch the action to | 
 | 453 | 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required. | 
 | 454 | 	 */ | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 455 | 	if (action == BTRFS_ADD_DELAYED_EXTENT) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 456 | 		must_insert_reserved = 1; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 457 | 	else | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 458 | 		must_insert_reserved = 0; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 459 |  | 
 | 460 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 461 |  | 
 | 462 | 	/* first set the basic ref node struct up */ | 
 | 463 | 	atomic_set(&ref->refs, 1); | 
 | 464 | 	ref->bytenr = bytenr; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 465 | 	ref->num_bytes = num_bytes; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 466 | 	ref->ref_mod = count_mod; | 
 | 467 | 	ref->type  = 0; | 
 | 468 | 	ref->action  = 0; | 
 | 469 | 	ref->is_head = 1; | 
 | 470 | 	ref->in_tree = 1; | 
| Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 471 | 	ref->seq = 0; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 472 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 473 | 	head_ref = btrfs_delayed_node_to_head(ref); | 
 | 474 | 	head_ref->must_insert_reserved = must_insert_reserved; | 
 | 475 | 	head_ref->is_data = is_data; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 476 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 477 | 	INIT_LIST_HEAD(&head_ref->cluster); | 
 | 478 | 	mutex_init(&head_ref->mutex); | 
 | 479 |  | 
| liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 480 | 	trace_btrfs_delayed_ref_head(ref, head_ref, action); | 
 | 481 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 482 | 	existing = tree_insert(&delayed_refs->root, &ref->rb_node); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 483 |  | 
 | 484 | 	if (existing) { | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 485 | 		update_existing_head_ref(existing, ref); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 486 | 		/* | 
 | 487 | 		 * we've updated the existing ref, free the newly | 
 | 488 | 		 * allocated ref | 
 | 489 | 		 */ | 
| Jeff Mahoney | cddcd80 | 2011-10-03 23:23:23 -0400 | [diff] [blame] | 490 | 		kfree(head_ref); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 491 | 	} else { | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 492 | 		delayed_refs->num_heads++; | 
 | 493 | 		delayed_refs->num_heads_ready++; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 494 | 		delayed_refs->num_entries++; | 
 | 495 | 		trans->delayed_ref_updates++; | 
 | 496 | 	} | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 497 | } | 
 | 498 |  | 
 | 499 | /* | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 500 |  * helper to insert a delayed tree ref into the rbtree. | 
 | 501 |  */ | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 502 | static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 503 | 					 struct btrfs_trans_handle *trans, | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 504 | 					 struct btrfs_delayed_ref_node *ref, | 
 | 505 | 					 u64 bytenr, u64 num_bytes, u64 parent, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 506 | 					 u64 ref_root, int level, int action, | 
 | 507 | 					 int for_cow) | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 508 | { | 
 | 509 | 	struct btrfs_delayed_ref_node *existing; | 
 | 510 | 	struct btrfs_delayed_tree_ref *full_ref; | 
 | 511 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
| Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 512 | 	u64 seq = 0; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 513 |  | 
 | 514 | 	if (action == BTRFS_ADD_DELAYED_EXTENT) | 
 | 515 | 		action = BTRFS_ADD_DELAYED_REF; | 
 | 516 |  | 
 | 517 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 518 |  | 
 | 519 | 	/* first set the basic ref node struct up */ | 
 | 520 | 	atomic_set(&ref->refs, 1); | 
 | 521 | 	ref->bytenr = bytenr; | 
 | 522 | 	ref->num_bytes = num_bytes; | 
 | 523 | 	ref->ref_mod = 1; | 
 | 524 | 	ref->action = action; | 
 | 525 | 	ref->is_head = 0; | 
 | 526 | 	ref->in_tree = 1; | 
 | 527 |  | 
| Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 528 | 	if (need_ref_seq(for_cow, ref_root)) | 
 | 529 | 		seq = inc_delayed_seq(delayed_refs); | 
 | 530 | 	ref->seq = seq; | 
 | 531 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 532 | 	full_ref = btrfs_delayed_node_to_tree_ref(ref); | 
| Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 533 | 	full_ref->parent = parent; | 
 | 534 | 	full_ref->root = ref_root; | 
 | 535 | 	if (parent) | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 536 | 		ref->type = BTRFS_SHARED_BLOCK_REF_KEY; | 
| Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 537 | 	else | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 538 | 		ref->type = BTRFS_TREE_BLOCK_REF_KEY; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 539 | 	full_ref->level = level; | 
 | 540 |  | 
| liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 541 | 	trace_btrfs_delayed_tree_ref(ref, full_ref, action); | 
 | 542 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 543 | 	existing = tree_insert(&delayed_refs->root, &ref->rb_node); | 
 | 544 |  | 
 | 545 | 	if (existing) { | 
 | 546 | 		update_existing_ref(trans, delayed_refs, existing, ref); | 
 | 547 | 		/* | 
 | 548 | 		 * we've updated the existing ref, free the newly | 
 | 549 | 		 * allocated ref | 
 | 550 | 		 */ | 
| Jeff Mahoney | cddcd80 | 2011-10-03 23:23:23 -0400 | [diff] [blame] | 551 | 		kfree(full_ref); | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 552 | 	} else { | 
 | 553 | 		delayed_refs->num_entries++; | 
 | 554 | 		trans->delayed_ref_updates++; | 
 | 555 | 	} | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 556 | } | 
 | 557 |  | 
 | 558 | /* | 
 | 559 |  * helper to insert a delayed data ref into the rbtree. | 
 | 560 |  */ | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 561 | static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 562 | 					 struct btrfs_trans_handle *trans, | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 563 | 					 struct btrfs_delayed_ref_node *ref, | 
 | 564 | 					 u64 bytenr, u64 num_bytes, u64 parent, | 
 | 565 | 					 u64 ref_root, u64 owner, u64 offset, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 566 | 					 int action, int for_cow) | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 567 | { | 
 | 568 | 	struct btrfs_delayed_ref_node *existing; | 
 | 569 | 	struct btrfs_delayed_data_ref *full_ref; | 
 | 570 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
| Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 571 | 	u64 seq = 0; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 572 |  | 
 | 573 | 	if (action == BTRFS_ADD_DELAYED_EXTENT) | 
 | 574 | 		action = BTRFS_ADD_DELAYED_REF; | 
 | 575 |  | 
 | 576 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 577 |  | 
 | 578 | 	/* first set the basic ref node struct up */ | 
 | 579 | 	atomic_set(&ref->refs, 1); | 
 | 580 | 	ref->bytenr = bytenr; | 
 | 581 | 	ref->num_bytes = num_bytes; | 
 | 582 | 	ref->ref_mod = 1; | 
 | 583 | 	ref->action = action; | 
 | 584 | 	ref->is_head = 0; | 
 | 585 | 	ref->in_tree = 1; | 
 | 586 |  | 
| Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 587 | 	if (need_ref_seq(for_cow, ref_root)) | 
 | 588 | 		seq = inc_delayed_seq(delayed_refs); | 
 | 589 | 	ref->seq = seq; | 
 | 590 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 591 | 	full_ref = btrfs_delayed_node_to_data_ref(ref); | 
| Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 592 | 	full_ref->parent = parent; | 
 | 593 | 	full_ref->root = ref_root; | 
 | 594 | 	if (parent) | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 595 | 		ref->type = BTRFS_SHARED_DATA_REF_KEY; | 
| Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 596 | 	else | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 597 | 		ref->type = BTRFS_EXTENT_DATA_REF_KEY; | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 598 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 599 | 	full_ref->objectid = owner; | 
 | 600 | 	full_ref->offset = offset; | 
 | 601 |  | 
| liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 602 | 	trace_btrfs_delayed_data_ref(ref, full_ref, action); | 
 | 603 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 604 | 	existing = tree_insert(&delayed_refs->root, &ref->rb_node); | 
 | 605 |  | 
 | 606 | 	if (existing) { | 
 | 607 | 		update_existing_ref(trans, delayed_refs, existing, ref); | 
 | 608 | 		/* | 
 | 609 | 		 * we've updated the existing ref, free the newly | 
 | 610 | 		 * allocated ref | 
 | 611 | 		 */ | 
| Jeff Mahoney | cddcd80 | 2011-10-03 23:23:23 -0400 | [diff] [blame] | 612 | 		kfree(full_ref); | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 613 | 	} else { | 
 | 614 | 		delayed_refs->num_entries++; | 
 | 615 | 		trans->delayed_ref_updates++; | 
 | 616 | 	} | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 617 | } | 
 | 618 |  | 
 | 619 | /* | 
 | 620 |  * add a delayed tree ref.  This does all of the accounting required | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 621 |  * to make sure the delayed ref is eventually processed before this | 
 | 622 |  * transaction commits. | 
 | 623 |  */ | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 624 | int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | 
 | 625 | 			       struct btrfs_trans_handle *trans, | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 626 | 			       u64 bytenr, u64 num_bytes, u64 parent, | 
 | 627 | 			       u64 ref_root,  int level, int action, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 628 | 			       struct btrfs_delayed_extent_op *extent_op, | 
 | 629 | 			       int for_cow) | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 630 | { | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 631 | 	struct btrfs_delayed_tree_ref *ref; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 632 | 	struct btrfs_delayed_ref_head *head_ref; | 
 | 633 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 634 |  | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 635 | 	BUG_ON(extent_op && extent_op->is_data); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 636 | 	ref = kmalloc(sizeof(*ref), GFP_NOFS); | 
 | 637 | 	if (!ref) | 
 | 638 | 		return -ENOMEM; | 
 | 639 |  | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 640 | 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); | 
 | 641 | 	if (!head_ref) { | 
 | 642 | 		kfree(ref); | 
 | 643 | 		return -ENOMEM; | 
 | 644 | 	} | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 645 |  | 
 | 646 | 	head_ref->extent_op = extent_op; | 
 | 647 |  | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 648 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 649 | 	spin_lock(&delayed_refs->lock); | 
 | 650 |  | 
 | 651 | 	/* | 
 | 652 | 	 * insert both the head node and the new ref without dropping | 
 | 653 | 	 * the spin lock | 
 | 654 | 	 */ | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 655 | 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 656 | 				   num_bytes, action, 0); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 657 |  | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 658 | 	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 659 | 				   num_bytes, parent, ref_root, level, action, | 
 | 660 | 				   for_cow); | 
| Jan Schmidt | a168650 | 2011-12-12 16:10:07 +0100 | [diff] [blame] | 661 | 	if (!need_ref_seq(for_cow, ref_root) && | 
 | 662 | 	    waitqueue_active(&delayed_refs->seq_wait)) | 
 | 663 | 		wake_up(&delayed_refs->seq_wait); | 
| Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 664 | 	spin_unlock(&delayed_refs->lock); | 
 | 665 | 	return 0; | 
 | 666 | } | 
 | 667 |  | 
 | 668 | /* | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 669 |  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. | 
 | 670 |  */ | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 671 | int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, | 
 | 672 | 			       struct btrfs_trans_handle *trans, | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 673 | 			       u64 bytenr, u64 num_bytes, | 
 | 674 | 			       u64 parent, u64 ref_root, | 
 | 675 | 			       u64 owner, u64 offset, int action, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 676 | 			       struct btrfs_delayed_extent_op *extent_op, | 
 | 677 | 			       int for_cow) | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 678 | { | 
 | 679 | 	struct btrfs_delayed_data_ref *ref; | 
 | 680 | 	struct btrfs_delayed_ref_head *head_ref; | 
 | 681 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 682 |  | 
 | 683 | 	BUG_ON(extent_op && !extent_op->is_data); | 
 | 684 | 	ref = kmalloc(sizeof(*ref), GFP_NOFS); | 
 | 685 | 	if (!ref) | 
 | 686 | 		return -ENOMEM; | 
 | 687 |  | 
 | 688 | 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); | 
 | 689 | 	if (!head_ref) { | 
 | 690 | 		kfree(ref); | 
 | 691 | 		return -ENOMEM; | 
 | 692 | 	} | 
 | 693 |  | 
 | 694 | 	head_ref->extent_op = extent_op; | 
 | 695 |  | 
 | 696 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 697 | 	spin_lock(&delayed_refs->lock); | 
 | 698 |  | 
 | 699 | 	/* | 
 | 700 | 	 * insert both the head node and the new ref without dropping | 
 | 701 | 	 * the spin lock | 
 | 702 | 	 */ | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 703 | 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 704 | 				   num_bytes, action, 1); | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 705 |  | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 706 | 	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 707 | 				   num_bytes, parent, ref_root, owner, offset, | 
 | 708 | 				   action, for_cow); | 
| Jan Schmidt | a168650 | 2011-12-12 16:10:07 +0100 | [diff] [blame] | 709 | 	if (!need_ref_seq(for_cow, ref_root) && | 
 | 710 | 	    waitqueue_active(&delayed_refs->seq_wait)) | 
 | 711 | 		wake_up(&delayed_refs->seq_wait); | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 712 | 	spin_unlock(&delayed_refs->lock); | 
 | 713 | 	return 0; | 
 | 714 | } | 
 | 715 |  | 
| Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 716 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, | 
 | 717 | 				struct btrfs_trans_handle *trans, | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 718 | 				u64 bytenr, u64 num_bytes, | 
 | 719 | 				struct btrfs_delayed_extent_op *extent_op) | 
 | 720 | { | 
 | 721 | 	struct btrfs_delayed_ref_head *head_ref; | 
 | 722 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 723 |  | 
 | 724 | 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); | 
 | 725 | 	if (!head_ref) | 
 | 726 | 		return -ENOMEM; | 
 | 727 |  | 
 | 728 | 	head_ref->extent_op = extent_op; | 
 | 729 |  | 
 | 730 | 	delayed_refs = &trans->transaction->delayed_refs; | 
 | 731 | 	spin_lock(&delayed_refs->lock); | 
 | 732 |  | 
| Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 733 | 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 734 | 				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD, | 
 | 735 | 				   extent_op->is_data); | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 736 |  | 
| Jan Schmidt | a168650 | 2011-12-12 16:10:07 +0100 | [diff] [blame] | 737 | 	if (waitqueue_active(&delayed_refs->seq_wait)) | 
 | 738 | 		wake_up(&delayed_refs->seq_wait); | 
| Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 739 | 	spin_unlock(&delayed_refs->lock); | 
 | 740 | 	return 0; | 
 | 741 | } | 
 | 742 |  | 
 | 743 | /* | 
| Chris Mason | 1887be6 | 2009-03-13 10:11:24 -0400 | [diff] [blame] | 744 |  * this does a simple search for the head node for a given extent. | 
 | 745 |  * It must be called with the delayed ref spinlock held, and it returns | 
 | 746 |  * the head node if any where found, or NULL if not. | 
 | 747 |  */ | 
 | 748 | struct btrfs_delayed_ref_head * | 
 | 749 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) | 
 | 750 | { | 
 | 751 | 	struct btrfs_delayed_ref_node *ref; | 
 | 752 | 	struct btrfs_delayed_ref_root *delayed_refs; | 
 | 753 |  | 
 | 754 | 	delayed_refs = &trans->transaction->delayed_refs; | 
| Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 755 | 	ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0); | 
| Chris Mason | 1887be6 | 2009-03-13 10:11:24 -0400 | [diff] [blame] | 756 | 	if (ref) | 
 | 757 | 		return btrfs_delayed_node_to_head(ref); | 
 | 758 | 	return NULL; | 
 | 759 | } |