| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/fs/hfsplus/bnode.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2001 | 
 | 5 |  * Brad Boyer (flar@allandria.com) | 
 | 6 |  * (C) 2003 Ardis Technologies <roman@ardistech.com> | 
 | 7 |  * | 
 | 8 |  * Handle basic btree node operations | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/string.h> | 
 | 12 | #include <linux/slab.h> | 
 | 13 | #include <linux/pagemap.h> | 
 | 14 | #include <linux/fs.h> | 
 | 15 | #include <linux/swap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
 | 17 | #include "hfsplus_fs.h" | 
 | 18 | #include "hfsplus_raw.h" | 
 | 19 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | /* Copy a specified range of bytes from the raw data of a node */ | 
 | 21 | void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) | 
 | 22 | { | 
 | 23 | 	struct page **pagep; | 
 | 24 | 	int l; | 
 | 25 |  | 
 | 26 | 	off += node->page_offset; | 
 | 27 | 	pagep = node->page + (off >> PAGE_CACHE_SHIFT); | 
 | 28 | 	off &= ~PAGE_CACHE_MASK; | 
 | 29 |  | 
 | 30 | 	l = min(len, (int)PAGE_CACHE_SIZE - off); | 
 | 31 | 	memcpy(buf, kmap(*pagep) + off, l); | 
 | 32 | 	kunmap(*pagep); | 
 | 33 |  | 
 | 34 | 	while ((len -= l) != 0) { | 
 | 35 | 		buf += l; | 
 | 36 | 		l = min(len, (int)PAGE_CACHE_SIZE); | 
 | 37 | 		memcpy(buf, kmap(*++pagep), l); | 
 | 38 | 		kunmap(*pagep); | 
 | 39 | 	} | 
 | 40 | } | 
 | 41 |  | 
 | 42 | u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) | 
 | 43 | { | 
 | 44 | 	__be16 data; | 
 | 45 | 	// optimize later... | 
 | 46 | 	hfs_bnode_read(node, &data, off, 2); | 
 | 47 | 	return be16_to_cpu(data); | 
 | 48 | } | 
 | 49 |  | 
 | 50 | u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) | 
 | 51 | { | 
 | 52 | 	u8 data; | 
 | 53 | 	// optimize later... | 
 | 54 | 	hfs_bnode_read(node, &data, off, 1); | 
 | 55 | 	return data; | 
 | 56 | } | 
 | 57 |  | 
 | 58 | void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) | 
 | 59 | { | 
 | 60 | 	struct hfs_btree *tree; | 
 | 61 | 	int key_len; | 
 | 62 |  | 
 | 63 | 	tree = node->tree; | 
 | 64 | 	if (node->type == HFS_NODE_LEAF || | 
 | 65 | 	    tree->attributes & HFS_TREE_VARIDXKEYS) | 
 | 66 | 		key_len = hfs_bnode_read_u16(node, off) + 2; | 
 | 67 | 	else | 
 | 68 | 		key_len = tree->max_key_len + 2; | 
 | 69 |  | 
 | 70 | 	hfs_bnode_read(node, key, off, key_len); | 
 | 71 | } | 
 | 72 |  | 
 | 73 | void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) | 
 | 74 | { | 
 | 75 | 	struct page **pagep; | 
 | 76 | 	int l; | 
 | 77 |  | 
 | 78 | 	off += node->page_offset; | 
 | 79 | 	pagep = node->page + (off >> PAGE_CACHE_SHIFT); | 
 | 80 | 	off &= ~PAGE_CACHE_MASK; | 
 | 81 |  | 
 | 82 | 	l = min(len, (int)PAGE_CACHE_SIZE - off); | 
 | 83 | 	memcpy(kmap(*pagep) + off, buf, l); | 
 | 84 | 	set_page_dirty(*pagep); | 
 | 85 | 	kunmap(*pagep); | 
 | 86 |  | 
 | 87 | 	while ((len -= l) != 0) { | 
 | 88 | 		buf += l; | 
 | 89 | 		l = min(len, (int)PAGE_CACHE_SIZE); | 
 | 90 | 		memcpy(kmap(*++pagep), buf, l); | 
 | 91 | 		set_page_dirty(*pagep); | 
 | 92 | 		kunmap(*pagep); | 
 | 93 | 	} | 
 | 94 | } | 
 | 95 |  | 
 | 96 | void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) | 
 | 97 | { | 
 | 98 | 	__be16 v = cpu_to_be16(data); | 
 | 99 | 	// optimize later... | 
 | 100 | 	hfs_bnode_write(node, &v, off, 2); | 
 | 101 | } | 
 | 102 |  | 
 | 103 | void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) | 
 | 104 | { | 
 | 105 | 	struct page **pagep; | 
 | 106 | 	int l; | 
 | 107 |  | 
 | 108 | 	off += node->page_offset; | 
 | 109 | 	pagep = node->page + (off >> PAGE_CACHE_SHIFT); | 
 | 110 | 	off &= ~PAGE_CACHE_MASK; | 
 | 111 |  | 
 | 112 | 	l = min(len, (int)PAGE_CACHE_SIZE - off); | 
 | 113 | 	memset(kmap(*pagep) + off, 0, l); | 
 | 114 | 	set_page_dirty(*pagep); | 
 | 115 | 	kunmap(*pagep); | 
 | 116 |  | 
 | 117 | 	while ((len -= l) != 0) { | 
 | 118 | 		l = min(len, (int)PAGE_CACHE_SIZE); | 
 | 119 | 		memset(kmap(*++pagep), 0, l); | 
 | 120 | 		set_page_dirty(*pagep); | 
 | 121 | 		kunmap(*pagep); | 
 | 122 | 	} | 
 | 123 | } | 
 | 124 |  | 
 | 125 | void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, | 
 | 126 | 		    struct hfs_bnode *src_node, int src, int len) | 
 | 127 | { | 
 | 128 | 	struct hfs_btree *tree; | 
 | 129 | 	struct page **src_page, **dst_page; | 
 | 130 | 	int l; | 
 | 131 |  | 
 | 132 | 	dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); | 
 | 133 | 	if (!len) | 
 | 134 | 		return; | 
 | 135 | 	tree = src_node->tree; | 
 | 136 | 	src += src_node->page_offset; | 
 | 137 | 	dst += dst_node->page_offset; | 
 | 138 | 	src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); | 
 | 139 | 	src &= ~PAGE_CACHE_MASK; | 
 | 140 | 	dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); | 
 | 141 | 	dst &= ~PAGE_CACHE_MASK; | 
 | 142 |  | 
 | 143 | 	if (src == dst) { | 
 | 144 | 		l = min(len, (int)PAGE_CACHE_SIZE - src); | 
 | 145 | 		memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); | 
 | 146 | 		kunmap(*src_page); | 
 | 147 | 		set_page_dirty(*dst_page); | 
 | 148 | 		kunmap(*dst_page); | 
 | 149 |  | 
 | 150 | 		while ((len -= l) != 0) { | 
 | 151 | 			l = min(len, (int)PAGE_CACHE_SIZE); | 
 | 152 | 			memcpy(kmap(*++dst_page), kmap(*++src_page), l); | 
 | 153 | 			kunmap(*src_page); | 
 | 154 | 			set_page_dirty(*dst_page); | 
 | 155 | 			kunmap(*dst_page); | 
 | 156 | 		} | 
 | 157 | 	} else { | 
 | 158 | 		void *src_ptr, *dst_ptr; | 
 | 159 |  | 
 | 160 | 		do { | 
 | 161 | 			src_ptr = kmap(*src_page) + src; | 
 | 162 | 			dst_ptr = kmap(*dst_page) + dst; | 
 | 163 | 			if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { | 
 | 164 | 				l = PAGE_CACHE_SIZE - src; | 
 | 165 | 				src = 0; | 
 | 166 | 				dst += l; | 
 | 167 | 			} else { | 
 | 168 | 				l = PAGE_CACHE_SIZE - dst; | 
 | 169 | 				src += l; | 
 | 170 | 				dst = 0; | 
 | 171 | 			} | 
 | 172 | 			l = min(len, l); | 
 | 173 | 			memcpy(dst_ptr, src_ptr, l); | 
 | 174 | 			kunmap(*src_page); | 
 | 175 | 			set_page_dirty(*dst_page); | 
 | 176 | 			kunmap(*dst_page); | 
 | 177 | 			if (!dst) | 
 | 178 | 				dst_page++; | 
 | 179 | 			else | 
 | 180 | 				src_page++; | 
 | 181 | 		} while ((len -= l)); | 
 | 182 | 	} | 
 | 183 | } | 
 | 184 |  | 
 | 185 | void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | 
 | 186 | { | 
 | 187 | 	struct page **src_page, **dst_page; | 
 | 188 | 	int l; | 
 | 189 |  | 
 | 190 | 	dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); | 
 | 191 | 	if (!len) | 
 | 192 | 		return; | 
 | 193 | 	src += node->page_offset; | 
 | 194 | 	dst += node->page_offset; | 
 | 195 | 	if (dst > src) { | 
 | 196 | 		src += len - 1; | 
 | 197 | 		src_page = node->page + (src >> PAGE_CACHE_SHIFT); | 
 | 198 | 		src = (src & ~PAGE_CACHE_MASK) + 1; | 
 | 199 | 		dst += len - 1; | 
 | 200 | 		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); | 
 | 201 | 		dst = (dst & ~PAGE_CACHE_MASK) + 1; | 
 | 202 |  | 
 | 203 | 		if (src == dst) { | 
 | 204 | 			while (src < len) { | 
 | 205 | 				memmove(kmap(*dst_page), kmap(*src_page), src); | 
 | 206 | 				kunmap(*src_page); | 
 | 207 | 				set_page_dirty(*dst_page); | 
 | 208 | 				kunmap(*dst_page); | 
 | 209 | 				len -= src; | 
 | 210 | 				src = PAGE_CACHE_SIZE; | 
 | 211 | 				src_page--; | 
 | 212 | 				dst_page--; | 
 | 213 | 			} | 
 | 214 | 			src -= len; | 
 | 215 | 			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len); | 
 | 216 | 			kunmap(*src_page); | 
 | 217 | 			set_page_dirty(*dst_page); | 
 | 218 | 			kunmap(*dst_page); | 
 | 219 | 		} else { | 
 | 220 | 			void *src_ptr, *dst_ptr; | 
 | 221 |  | 
 | 222 | 			do { | 
 | 223 | 				src_ptr = kmap(*src_page) + src; | 
 | 224 | 				dst_ptr = kmap(*dst_page) + dst; | 
 | 225 | 				if (src < dst) { | 
 | 226 | 					l = src; | 
 | 227 | 					src = PAGE_CACHE_SIZE; | 
 | 228 | 					dst -= l; | 
 | 229 | 				} else { | 
 | 230 | 					l = dst; | 
 | 231 | 					src -= l; | 
 | 232 | 					dst = PAGE_CACHE_SIZE; | 
 | 233 | 				} | 
 | 234 | 				l = min(len, l); | 
 | 235 | 				memmove(dst_ptr - l, src_ptr - l, l); | 
 | 236 | 				kunmap(*src_page); | 
 | 237 | 				set_page_dirty(*dst_page); | 
 | 238 | 				kunmap(*dst_page); | 
 | 239 | 				if (dst == PAGE_CACHE_SIZE) | 
 | 240 | 					dst_page--; | 
 | 241 | 				else | 
 | 242 | 					src_page--; | 
 | 243 | 			} while ((len -= l)); | 
 | 244 | 		} | 
 | 245 | 	} else { | 
 | 246 | 		src_page = node->page + (src >> PAGE_CACHE_SHIFT); | 
 | 247 | 		src &= ~PAGE_CACHE_MASK; | 
 | 248 | 		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); | 
 | 249 | 		dst &= ~PAGE_CACHE_MASK; | 
 | 250 |  | 
 | 251 | 		if (src == dst) { | 
 | 252 | 			l = min(len, (int)PAGE_CACHE_SIZE - src); | 
 | 253 | 			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l); | 
 | 254 | 			kunmap(*src_page); | 
 | 255 | 			set_page_dirty(*dst_page); | 
 | 256 | 			kunmap(*dst_page); | 
 | 257 |  | 
 | 258 | 			while ((len -= l) != 0) { | 
 | 259 | 				l = min(len, (int)PAGE_CACHE_SIZE); | 
 | 260 | 				memmove(kmap(*++dst_page), kmap(*++src_page), l); | 
 | 261 | 				kunmap(*src_page); | 
 | 262 | 				set_page_dirty(*dst_page); | 
 | 263 | 				kunmap(*dst_page); | 
 | 264 | 			} | 
 | 265 | 		} else { | 
 | 266 | 			void *src_ptr, *dst_ptr; | 
 | 267 |  | 
 | 268 | 			do { | 
 | 269 | 				src_ptr = kmap(*src_page) + src; | 
 | 270 | 				dst_ptr = kmap(*dst_page) + dst; | 
 | 271 | 				if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { | 
 | 272 | 					l = PAGE_CACHE_SIZE - src; | 
 | 273 | 					src = 0; | 
 | 274 | 					dst += l; | 
 | 275 | 				} else { | 
 | 276 | 					l = PAGE_CACHE_SIZE - dst; | 
 | 277 | 					src += l; | 
 | 278 | 					dst = 0; | 
 | 279 | 				} | 
 | 280 | 				l = min(len, l); | 
 | 281 | 				memmove(dst_ptr, src_ptr, l); | 
 | 282 | 				kunmap(*src_page); | 
 | 283 | 				set_page_dirty(*dst_page); | 
 | 284 | 				kunmap(*dst_page); | 
 | 285 | 				if (!dst) | 
 | 286 | 					dst_page++; | 
 | 287 | 				else | 
 | 288 | 					src_page++; | 
 | 289 | 			} while ((len -= l)); | 
 | 290 | 		} | 
 | 291 | 	} | 
 | 292 | } | 
 | 293 |  | 
 | 294 | void hfs_bnode_dump(struct hfs_bnode *node) | 
 | 295 | { | 
 | 296 | 	struct hfs_bnode_desc desc; | 
 | 297 | 	__be32 cnid; | 
 | 298 | 	int i, off, key_off; | 
 | 299 |  | 
 | 300 | 	dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); | 
 | 301 | 	hfs_bnode_read(node, &desc, 0, sizeof(desc)); | 
 | 302 | 	dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", | 
 | 303 | 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev), | 
 | 304 | 		desc.type, desc.height, be16_to_cpu(desc.num_recs)); | 
 | 305 |  | 
 | 306 | 	off = node->tree->node_size - 2; | 
 | 307 | 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { | 
 | 308 | 		key_off = hfs_bnode_read_u16(node, off); | 
 | 309 | 		dprint(DBG_BNODE_MOD, " %d", key_off); | 
 | 310 | 		if (i && node->type == HFS_NODE_INDEX) { | 
 | 311 | 			int tmp; | 
 | 312 |  | 
 | 313 | 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS) | 
 | 314 | 				tmp = hfs_bnode_read_u16(node, key_off) + 2; | 
 | 315 | 			else | 
 | 316 | 				tmp = node->tree->max_key_len + 2; | 
 | 317 | 			dprint(DBG_BNODE_MOD, " (%d", tmp); | 
 | 318 | 			hfs_bnode_read(node, &cnid, key_off + tmp, 4); | 
 | 319 | 			dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); | 
 | 320 | 		} else if (i && node->type == HFS_NODE_LEAF) { | 
 | 321 | 			int tmp; | 
 | 322 |  | 
 | 323 | 			tmp = hfs_bnode_read_u16(node, key_off); | 
 | 324 | 			dprint(DBG_BNODE_MOD, " (%d)", tmp); | 
 | 325 | 		} | 
 | 326 | 	} | 
 | 327 | 	dprint(DBG_BNODE_MOD, "\n"); | 
 | 328 | } | 
 | 329 |  | 
 | 330 | void hfs_bnode_unlink(struct hfs_bnode *node) | 
 | 331 | { | 
 | 332 | 	struct hfs_btree *tree; | 
 | 333 | 	struct hfs_bnode *tmp; | 
 | 334 | 	__be32 cnid; | 
 | 335 |  | 
 | 336 | 	tree = node->tree; | 
 | 337 | 	if (node->prev) { | 
 | 338 | 		tmp = hfs_bnode_find(tree, node->prev); | 
 | 339 | 		if (IS_ERR(tmp)) | 
 | 340 | 			return; | 
 | 341 | 		tmp->next = node->next; | 
 | 342 | 		cnid = cpu_to_be32(tmp->next); | 
 | 343 | 		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); | 
 | 344 | 		hfs_bnode_put(tmp); | 
 | 345 | 	} else if (node->type == HFS_NODE_LEAF) | 
 | 346 | 		tree->leaf_head = node->next; | 
 | 347 |  | 
 | 348 | 	if (node->next) { | 
 | 349 | 		tmp = hfs_bnode_find(tree, node->next); | 
 | 350 | 		if (IS_ERR(tmp)) | 
 | 351 | 			return; | 
 | 352 | 		tmp->prev = node->prev; | 
 | 353 | 		cnid = cpu_to_be32(tmp->prev); | 
 | 354 | 		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); | 
 | 355 | 		hfs_bnode_put(tmp); | 
 | 356 | 	} else if (node->type == HFS_NODE_LEAF) | 
 | 357 | 		tree->leaf_tail = node->prev; | 
 | 358 |  | 
 | 359 | 	// move down? | 
 | 360 | 	if (!node->prev && !node->next) { | 
| Roman Zippel | 634725a | 2006-01-18 17:43:05 -0800 | [diff] [blame] | 361 | 		printk(KERN_DEBUG "hfs_btree_del_level\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | 	} | 
 | 363 | 	if (!node->parent) { | 
 | 364 | 		tree->root = 0; | 
 | 365 | 		tree->depth = 0; | 
 | 366 | 	} | 
 | 367 | 	set_bit(HFS_BNODE_DELETED, &node->flags); | 
 | 368 | } | 
 | 369 |  | 
 | 370 | static inline int hfs_bnode_hash(u32 num) | 
 | 371 | { | 
 | 372 | 	num = (num >> 16) + num; | 
 | 373 | 	num += num >> 8; | 
 | 374 | 	return num & (NODE_HASH_SIZE - 1); | 
 | 375 | } | 
 | 376 |  | 
 | 377 | struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) | 
 | 378 | { | 
 | 379 | 	struct hfs_bnode *node; | 
 | 380 |  | 
 | 381 | 	if (cnid >= tree->node_count) { | 
| Roman Zippel | 634725a | 2006-01-18 17:43:05 -0800 | [diff] [blame] | 382 | 		printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | 		return NULL; | 
 | 384 | 	} | 
 | 385 |  | 
 | 386 | 	for (node = tree->node_hash[hfs_bnode_hash(cnid)]; | 
 | 387 | 	     node; node = node->next_hash) { | 
 | 388 | 		if (node->this == cnid) { | 
 | 389 | 			return node; | 
 | 390 | 		} | 
 | 391 | 	} | 
 | 392 | 	return NULL; | 
 | 393 | } | 
 | 394 |  | 
 | 395 | static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) | 
 | 396 | { | 
 | 397 | 	struct super_block *sb; | 
 | 398 | 	struct hfs_bnode *node, *node2; | 
 | 399 | 	struct address_space *mapping; | 
 | 400 | 	struct page *page; | 
 | 401 | 	int size, block, i, hash; | 
 | 402 | 	loff_t off; | 
 | 403 |  | 
 | 404 | 	if (cnid >= tree->node_count) { | 
| Roman Zippel | 634725a | 2006-01-18 17:43:05 -0800 | [diff] [blame] | 405 | 		printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | 		return NULL; | 
 | 407 | 	} | 
 | 408 |  | 
 | 409 | 	sb = tree->inode->i_sb; | 
 | 410 | 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * | 
 | 411 | 		sizeof(struct page *); | 
| Panagiotis Issaris | f8314dc | 2006-09-27 01:49:37 -0700 | [diff] [blame] | 412 | 	node = kzalloc(size, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | 	if (!node) | 
 | 414 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | 	node->tree = tree; | 
 | 416 | 	node->this = cnid; | 
 | 417 | 	set_bit(HFS_BNODE_NEW, &node->flags); | 
 | 418 | 	atomic_set(&node->refcnt, 1); | 
 | 419 | 	dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", | 
 | 420 | 	       node->tree->cnid, node->this); | 
 | 421 | 	init_waitqueue_head(&node->lock_wq); | 
 | 422 | 	spin_lock(&tree->hash_lock); | 
 | 423 | 	node2 = hfs_bnode_findhash(tree, cnid); | 
 | 424 | 	if (!node2) { | 
 | 425 | 		hash = hfs_bnode_hash(cnid); | 
 | 426 | 		node->next_hash = tree->node_hash[hash]; | 
 | 427 | 		tree->node_hash[hash] = node; | 
 | 428 | 		tree->node_hash_cnt++; | 
 | 429 | 	} else { | 
 | 430 | 		spin_unlock(&tree->hash_lock); | 
 | 431 | 		kfree(node); | 
 | 432 | 		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); | 
 | 433 | 		return node2; | 
 | 434 | 	} | 
 | 435 | 	spin_unlock(&tree->hash_lock); | 
 | 436 |  | 
 | 437 | 	mapping = tree->inode->i_mapping; | 
 | 438 | 	off = (loff_t)cnid << tree->node_size_shift; | 
 | 439 | 	block = off >> PAGE_CACHE_SHIFT; | 
 | 440 | 	node->page_offset = off & ~PAGE_CACHE_MASK; | 
 | 441 | 	for (i = 0; i < tree->pages_per_bnode; block++, i++) { | 
| Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 442 | 		page = read_mapping_page(mapping, block, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | 		if (IS_ERR(page)) | 
 | 444 | 			goto fail; | 
 | 445 | 		if (PageError(page)) { | 
 | 446 | 			page_cache_release(page); | 
 | 447 | 			goto fail; | 
 | 448 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | 		page_cache_release(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | 		node->page[i] = page; | 
 | 451 | 	} | 
 | 452 |  | 
 | 453 | 	return node; | 
 | 454 | fail: | 
 | 455 | 	set_bit(HFS_BNODE_ERROR, &node->flags); | 
 | 456 | 	return node; | 
 | 457 | } | 
 | 458 |  | 
 | 459 | void hfs_bnode_unhash(struct hfs_bnode *node) | 
 | 460 | { | 
 | 461 | 	struct hfs_bnode **p; | 
 | 462 |  | 
 | 463 | 	dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", | 
 | 464 | 		node->tree->cnid, node->this, atomic_read(&node->refcnt)); | 
 | 465 | 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; | 
 | 466 | 	     *p && *p != node; p = &(*p)->next_hash) | 
 | 467 | 		; | 
| Eric Sesterhenn | 0bf3ba5 | 2006-04-01 01:14:43 +0200 | [diff] [blame] | 468 | 	BUG_ON(!*p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | 	*p = node->next_hash; | 
 | 470 | 	node->tree->node_hash_cnt--; | 
 | 471 | } | 
 | 472 |  | 
 | 473 | /* Load a particular node out of a tree */ | 
 | 474 | struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) | 
 | 475 | { | 
 | 476 | 	struct hfs_bnode *node; | 
 | 477 | 	struct hfs_bnode_desc *desc; | 
 | 478 | 	int i, rec_off, off, next_off; | 
 | 479 | 	int entry_size, key_size; | 
 | 480 |  | 
 | 481 | 	spin_lock(&tree->hash_lock); | 
 | 482 | 	node = hfs_bnode_findhash(tree, num); | 
 | 483 | 	if (node) { | 
 | 484 | 		hfs_bnode_get(node); | 
 | 485 | 		spin_unlock(&tree->hash_lock); | 
 | 486 | 		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); | 
 | 487 | 		if (test_bit(HFS_BNODE_ERROR, &node->flags)) | 
 | 488 | 			goto node_error; | 
 | 489 | 		return node; | 
 | 490 | 	} | 
 | 491 | 	spin_unlock(&tree->hash_lock); | 
 | 492 | 	node = __hfs_bnode_create(tree, num); | 
 | 493 | 	if (!node) | 
 | 494 | 		return ERR_PTR(-ENOMEM); | 
 | 495 | 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) | 
 | 496 | 		goto node_error; | 
 | 497 | 	if (!test_bit(HFS_BNODE_NEW, &node->flags)) | 
 | 498 | 		return node; | 
 | 499 |  | 
 | 500 | 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); | 
 | 501 | 	node->prev = be32_to_cpu(desc->prev); | 
 | 502 | 	node->next = be32_to_cpu(desc->next); | 
 | 503 | 	node->num_recs = be16_to_cpu(desc->num_recs); | 
 | 504 | 	node->type = desc->type; | 
 | 505 | 	node->height = desc->height; | 
 | 506 | 	kunmap(node->page[0]); | 
 | 507 |  | 
 | 508 | 	switch (node->type) { | 
 | 509 | 	case HFS_NODE_HEADER: | 
 | 510 | 	case HFS_NODE_MAP: | 
 | 511 | 		if (node->height != 0) | 
 | 512 | 			goto node_error; | 
 | 513 | 		break; | 
 | 514 | 	case HFS_NODE_LEAF: | 
 | 515 | 		if (node->height != 1) | 
 | 516 | 			goto node_error; | 
 | 517 | 		break; | 
 | 518 | 	case HFS_NODE_INDEX: | 
 | 519 | 		if (node->height <= 1 || node->height > tree->depth) | 
 | 520 | 			goto node_error; | 
 | 521 | 		break; | 
 | 522 | 	default: | 
 | 523 | 		goto node_error; | 
 | 524 | 	} | 
 | 525 |  | 
 | 526 | 	rec_off = tree->node_size - 2; | 
 | 527 | 	off = hfs_bnode_read_u16(node, rec_off); | 
 | 528 | 	if (off != sizeof(struct hfs_bnode_desc)) | 
 | 529 | 		goto node_error; | 
 | 530 | 	for (i = 1; i <= node->num_recs; off = next_off, i++) { | 
 | 531 | 		rec_off -= 2; | 
 | 532 | 		next_off = hfs_bnode_read_u16(node, rec_off); | 
 | 533 | 		if (next_off <= off || | 
 | 534 | 		    next_off > tree->node_size || | 
 | 535 | 		    next_off & 1) | 
 | 536 | 			goto node_error; | 
 | 537 | 		entry_size = next_off - off; | 
 | 538 | 		if (node->type != HFS_NODE_INDEX && | 
 | 539 | 		    node->type != HFS_NODE_LEAF) | 
 | 540 | 			continue; | 
 | 541 | 		key_size = hfs_bnode_read_u16(node, off) + 2; | 
 | 542 | 		if (key_size >= entry_size || key_size & 1) | 
 | 543 | 			goto node_error; | 
 | 544 | 	} | 
 | 545 | 	clear_bit(HFS_BNODE_NEW, &node->flags); | 
 | 546 | 	wake_up(&node->lock_wq); | 
 | 547 | 	return node; | 
 | 548 |  | 
 | 549 | node_error: | 
 | 550 | 	set_bit(HFS_BNODE_ERROR, &node->flags); | 
 | 551 | 	clear_bit(HFS_BNODE_NEW, &node->flags); | 
 | 552 | 	wake_up(&node->lock_wq); | 
 | 553 | 	hfs_bnode_put(node); | 
 | 554 | 	return ERR_PTR(-EIO); | 
 | 555 | } | 
 | 556 |  | 
 | 557 | void hfs_bnode_free(struct hfs_bnode *node) | 
 | 558 | { | 
 | 559 | 	//int i; | 
 | 560 |  | 
 | 561 | 	//for (i = 0; i < node->tree->pages_per_bnode; i++) | 
 | 562 | 	//	if (node->page[i]) | 
 | 563 | 	//		page_cache_release(node->page[i]); | 
 | 564 | 	kfree(node); | 
 | 565 | } | 
 | 566 |  | 
 | 567 | struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) | 
 | 568 | { | 
 | 569 | 	struct hfs_bnode *node; | 
 | 570 | 	struct page **pagep; | 
 | 571 | 	int i; | 
 | 572 |  | 
 | 573 | 	spin_lock(&tree->hash_lock); | 
 | 574 | 	node = hfs_bnode_findhash(tree, num); | 
 | 575 | 	spin_unlock(&tree->hash_lock); | 
 | 576 | 	if (node) { | 
| Roman Zippel | 634725a | 2006-01-18 17:43:05 -0800 | [diff] [blame] | 577 | 		printk(KERN_CRIT "new node %u already hashed?\n", num); | 
 | 578 | 		WARN_ON(1); | 
 | 579 | 		return node; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | 	} | 
 | 581 | 	node = __hfs_bnode_create(tree, num); | 
 | 582 | 	if (!node) | 
 | 583 | 		return ERR_PTR(-ENOMEM); | 
 | 584 | 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) { | 
 | 585 | 		hfs_bnode_put(node); | 
 | 586 | 		return ERR_PTR(-EIO); | 
 | 587 | 	} | 
 | 588 |  | 
 | 589 | 	pagep = node->page; | 
 | 590 | 	memset(kmap(*pagep) + node->page_offset, 0, | 
 | 591 | 	       min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); | 
 | 592 | 	set_page_dirty(*pagep); | 
 | 593 | 	kunmap(*pagep); | 
 | 594 | 	for (i = 1; i < tree->pages_per_bnode; i++) { | 
 | 595 | 		memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); | 
 | 596 | 		set_page_dirty(*pagep); | 
 | 597 | 		kunmap(*pagep); | 
 | 598 | 	} | 
 | 599 | 	clear_bit(HFS_BNODE_NEW, &node->flags); | 
 | 600 | 	wake_up(&node->lock_wq); | 
 | 601 |  | 
 | 602 | 	return node; | 
 | 603 | } | 
 | 604 |  | 
 | 605 | void hfs_bnode_get(struct hfs_bnode *node) | 
 | 606 | { | 
 | 607 | 	if (node) { | 
 | 608 | 		atomic_inc(&node->refcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | 		dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", | 
 | 610 | 		       node->tree->cnid, node->this, atomic_read(&node->refcnt)); | 
 | 611 | 	} | 
 | 612 | } | 
 | 613 |  | 
 | 614 | /* Dispose of resources used by a node */ | 
 | 615 | void hfs_bnode_put(struct hfs_bnode *node) | 
 | 616 | { | 
 | 617 | 	if (node) { | 
 | 618 | 		struct hfs_btree *tree = node->tree; | 
 | 619 | 		int i; | 
 | 620 |  | 
 | 621 | 		dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", | 
 | 622 | 		       node->tree->cnid, node->this, atomic_read(&node->refcnt)); | 
| Eric Sesterhenn | 0bf3ba5 | 2006-04-01 01:14:43 +0200 | [diff] [blame] | 623 | 		BUG_ON(!atomic_read(&node->refcnt)); | 
| Roman Zippel | a5e3985 | 2005-09-06 15:18:47 -0700 | [diff] [blame] | 624 | 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | 			return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | 		for (i = 0; i < tree->pages_per_bnode; i++) { | 
| Roman Zippel | 74f9c9c | 2005-08-01 21:11:41 -0700 | [diff] [blame] | 627 | 			if (!node->page[i]) | 
 | 628 | 				continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | 			mark_page_accessed(node->page[i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | 		} | 
 | 631 |  | 
 | 632 | 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) { | 
 | 633 | 			hfs_bnode_unhash(node); | 
 | 634 | 			spin_unlock(&tree->hash_lock); | 
 | 635 | 			hfs_bmap_free(node); | 
 | 636 | 			hfs_bnode_free(node); | 
 | 637 | 			return; | 
 | 638 | 		} | 
 | 639 | 		spin_unlock(&tree->hash_lock); | 
 | 640 | 	} | 
 | 641 | } | 
 | 642 |  |