| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2011 Red Hat, Inc. | 
 | 3 |  * | 
 | 4 |  * This file is released under the GPL. | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | #ifndef DM_BTREE_INTERNAL_H | 
 | 8 | #define DM_BTREE_INTERNAL_H | 
 | 9 |  | 
 | 10 | #include "dm-btree.h" | 
 | 11 |  | 
 | 12 | /*----------------------------------------------------------------*/ | 
 | 13 |  | 
 | 14 | /* | 
 | 15 |  * We'll need 2 accessor functions for n->csum and n->blocknr | 
 | 16 |  * to support dm-btree-spine.c in that case. | 
 | 17 |  */ | 
 | 18 |  | 
 | 19 | enum node_flags { | 
 | 20 | 	INTERNAL_NODE = 1, | 
 | 21 | 	LEAF_NODE = 1 << 1 | 
 | 22 | }; | 
 | 23 |  | 
 | 24 | /* | 
 | 25 |  * Every btree node begins with this structure.  Make sure it's a multiple | 
 | 26 |  * of 8-bytes in size, otherwise the 64bit keys will be mis-aligned. | 
 | 27 |  */ | 
 | 28 | struct node_header { | 
 | 29 | 	__le32 csum; | 
 | 30 | 	__le32 flags; | 
 | 31 | 	__le64 blocknr; /* Block this node is supposed to live in. */ | 
 | 32 |  | 
 | 33 | 	__le32 nr_entries; | 
 | 34 | 	__le32 max_entries; | 
 | 35 | 	__le32 value_size; | 
 | 36 | 	__le32 padding; | 
 | 37 | } __packed; | 
 | 38 |  | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 39 | struct btree_node { | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 40 | 	struct node_header header; | 
 | 41 | 	__le64 keys[0]; | 
 | 42 | } __packed; | 
 | 43 |  | 
 | 44 |  | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 45 | void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 46 | 		  struct dm_btree_value_type *vt); | 
 | 47 |  | 
 | 48 | int new_block(struct dm_btree_info *info, struct dm_block **result); | 
 | 49 | int unlock_block(struct dm_btree_info *info, struct dm_block *b); | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  * Spines keep track of the rolling locks.  There are 2 variants, read-only | 
 | 53 |  * and one that uses shadowing.  These are separate structs to allow the | 
 | 54 |  * type checker to spot misuse, for example accidentally calling read_lock | 
 | 55 |  * on a shadow spine. | 
 | 56 |  */ | 
 | 57 | struct ro_spine { | 
 | 58 | 	struct dm_btree_info *info; | 
 | 59 |  | 
 | 60 | 	int count; | 
 | 61 | 	struct dm_block *nodes[2]; | 
 | 62 | }; | 
 | 63 |  | 
 | 64 | void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info); | 
 | 65 | int exit_ro_spine(struct ro_spine *s); | 
 | 66 | int ro_step(struct ro_spine *s, dm_block_t new_child); | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 67 | struct btree_node *ro_node(struct ro_spine *s); | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 68 |  | 
 | 69 | struct shadow_spine { | 
 | 70 | 	struct dm_btree_info *info; | 
 | 71 |  | 
 | 72 | 	int count; | 
 | 73 | 	struct dm_block *nodes[2]; | 
 | 74 |  | 
 | 75 | 	dm_block_t root; | 
 | 76 | }; | 
 | 77 |  | 
 | 78 | void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info); | 
 | 79 | int exit_shadow_spine(struct shadow_spine *s); | 
 | 80 |  | 
 | 81 | int shadow_step(struct shadow_spine *s, dm_block_t b, | 
 | 82 | 		struct dm_btree_value_type *vt); | 
 | 83 |  | 
 | 84 | /* | 
 | 85 |  * The spine must have at least one entry before calling this. | 
 | 86 |  */ | 
 | 87 | struct dm_block *shadow_current(struct shadow_spine *s); | 
 | 88 |  | 
 | 89 | /* | 
 | 90 |  * The spine must have at least two entries before calling this. | 
 | 91 |  */ | 
 | 92 | struct dm_block *shadow_parent(struct shadow_spine *s); | 
 | 93 |  | 
 | 94 | int shadow_has_parent(struct shadow_spine *s); | 
 | 95 |  | 
 | 96 | int shadow_root(struct shadow_spine *s); | 
 | 97 |  | 
 | 98 | /* | 
 | 99 |  * Some inlines. | 
 | 100 |  */ | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 101 | static inline __le64 *key_ptr(struct btree_node *n, uint32_t index) | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 102 | { | 
 | 103 | 	return n->keys + index; | 
 | 104 | } | 
 | 105 |  | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 106 | static inline void *value_base(struct btree_node *n) | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 107 | { | 
 | 108 | 	return &n->keys[le32_to_cpu(n->header.max_entries)]; | 
 | 109 | } | 
 | 110 |  | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 111 | static inline void *value_ptr(struct btree_node *n, uint32_t index) | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 112 | { | 
| Joe Thornber | a3aefb3 | 2012-03-28 18:41:25 +0100 | [diff] [blame] | 113 | 	uint32_t value_size = le32_to_cpu(n->header.value_size); | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 114 | 	return value_base(n) + (value_size * index); | 
 | 115 | } | 
 | 116 |  | 
 | 117 | /* | 
 | 118 |  * Assumes the values are suitably-aligned and converts to core format. | 
 | 119 |  */ | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 120 | static inline uint64_t value64(struct btree_node *n, uint32_t index) | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 121 | { | 
 | 122 | 	__le64 *values_le = value_base(n); | 
 | 123 |  | 
 | 124 | 	return le64_to_cpu(values_le[index]); | 
 | 125 | } | 
 | 126 |  | 
 | 127 | /* | 
 | 128 |  * Searching for a key within a single node. | 
 | 129 |  */ | 
| Mikulas Patocka | 38d6758 | 2012-12-21 20:23:30 +0000 | [diff] [blame] | 130 | int lower_bound(struct btree_node *n, uint64_t key); | 
| Joe Thornber | 3241b1d | 2011-10-31 20:19:11 +0000 | [diff] [blame] | 131 |  | 
 | 132 | extern struct dm_block_validator btree_node_validator; | 
 | 133 |  | 
 | 134 | #endif	/* DM_BTREE_INTERNAL_H */ |