blob: 174416f7882b268ac970fc00e0462d1f3cbf9188 [file] [log] [blame]
Chris Mason56bec292009-03-13 10:10:06 -04001/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#ifndef __DELAYED_REF__
19#define __DELAYED_REF__
20
21/* these are the possible values of struct btrfs_delayed_ref->action */
22#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
23#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
24#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
Chris Mason1a81af42009-03-25 09:55:11 -040025#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
Chris Mason56bec292009-03-13 10:10:06 -040026
27struct btrfs_delayed_ref_node {
28 struct rb_node rb_node;
29
30 /* the starting bytenr of the extent */
31 u64 bytenr;
32
Chris Mason56bec292009-03-13 10:10:06 -040033 /* the size of the extent */
34 u64 num_bytes;
35
Arne Jansen00f04b82011-09-14 12:37:00 +020036 /* seq number to keep track of insertion order */
37 u64 seq;
38
Chris Mason56bec292009-03-13 10:10:06 -040039 /* ref count on this data structure */
40 atomic_t refs;
41
42 /*
43 * how many refs is this entry adding or deleting. For
44 * head refs, this may be a negative number because it is keeping
45 * track of the total mods done to the reference count.
46 * For individual refs, this will always be a positive number
47 *
48 * It may be more than one, since it is possible for a single
49 * parent to have more than one ref on an extent
50 */
51 int ref_mod;
52
Yan Zheng5d4f98a2009-06-10 10:45:14 -040053 unsigned int action:8;
54 unsigned int type:8;
Chris Mason56bec292009-03-13 10:10:06 -040055 /* is this node still in the rbtree? */
Yan Zheng5d4f98a2009-06-10 10:45:14 -040056 unsigned int is_head:1;
Chris Mason56bec292009-03-13 10:10:06 -040057 unsigned int in_tree:1;
58};
59
Yan Zheng5d4f98a2009-06-10 10:45:14 -040060struct btrfs_delayed_extent_op {
61 struct btrfs_disk_key key;
62 u64 flags_to_set;
63 unsigned int update_key:1;
64 unsigned int update_flags:1;
65 unsigned int is_data:1;
66};
67
Chris Mason56bec292009-03-13 10:10:06 -040068/*
69 * the head refs are used to hold a lock on a given extent, which allows us
70 * to make sure that only one process is running the delayed refs
71 * at a time for a single extent. They also store the sum of all the
72 * reference count modifications we've queued up.
73 */
74struct btrfs_delayed_ref_head {
75 struct btrfs_delayed_ref_node node;
76
77 /*
78 * the mutex is held while running the refs, and it is also
79 * held when checking the sum of reference modifications.
80 */
81 struct mutex mutex;
82
Chris Masonc3e69d52009-03-13 10:17:05 -040083 struct list_head cluster;
84
Yan Zheng5d4f98a2009-06-10 10:45:14 -040085 struct btrfs_delayed_extent_op *extent_op;
Chris Mason56bec292009-03-13 10:10:06 -040086 /*
87 * when a new extent is allocated, it is just reserved in memory
88 * The actual extent isn't inserted into the extent allocation tree
89 * until the delayed ref is processed. must_insert_reserved is
90 * used to flag a delayed ref so the accounting can be updated
91 * when a full insert is done.
92 *
93 * It is possible the extent will be freed before it is ever
94 * inserted into the extent allocation tree. In this case
95 * we need to update the in ram accounting to properly reflect
96 * the free has happened.
97 */
98 unsigned int must_insert_reserved:1;
Yan Zheng5d4f98a2009-06-10 10:45:14 -040099 unsigned int is_data:1;
Chris Mason56bec292009-03-13 10:10:06 -0400100};
101
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400102struct btrfs_delayed_tree_ref {
Chris Mason56bec292009-03-13 10:10:06 -0400103 struct btrfs_delayed_ref_node node;
Arne Janseneebe0632011-09-14 14:01:24 +0200104 u64 root;
105 u64 parent;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400106 int level;
107};
Chris Mason56bec292009-03-13 10:10:06 -0400108
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400109struct btrfs_delayed_data_ref {
110 struct btrfs_delayed_ref_node node;
Arne Janseneebe0632011-09-14 14:01:24 +0200111 u64 root;
112 u64 parent;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400113 u64 objectid;
114 u64 offset;
Chris Mason56bec292009-03-13 10:10:06 -0400115};
116
117struct btrfs_delayed_ref_root {
118 struct rb_root root;
119
120 /* this spin lock protects the rbtree and the entries inside */
121 spinlock_t lock;
122
123 /* how many delayed ref updates we've queued, used by the
124 * throttling code
125 */
126 unsigned long num_entries;
127
Chris Masonc3e69d52009-03-13 10:17:05 -0400128 /* total number of head nodes in tree */
129 unsigned long num_heads;
130
131 /* total number of head nodes ready for processing */
132 unsigned long num_heads_ready;
133
Chris Mason56bec292009-03-13 10:10:06 -0400134 /*
135 * set when the tree is flushing before a transaction commit,
136 * used by the throttling code to decide if new updates need
137 * to be run right away
138 */
139 int flushing;
Chris Masonc3e69d52009-03-13 10:17:05 -0400140
141 u64 run_delayed_start;
Arne Jansen00f04b82011-09-14 12:37:00 +0200142
143 /*
144 * seq number of delayed refs. We need to know if a backref was being
145 * added before the currently processed ref or afterwards.
146 */
147 u64 seq;
148
149 /*
150 * seq_list holds a list of all seq numbers that are currently being
151 * added to the list. While walking backrefs (btrfs_find_all_roots,
152 * qgroups), which might take some time, no newer ref must be processed,
153 * as it might influence the outcome of the walk.
154 */
155 struct list_head seq_head;
Chris Mason56bec292009-03-13 10:10:06 -0400156};
157
158static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
159{
160 WARN_ON(atomic_read(&ref->refs) == 0);
161 if (atomic_dec_and_test(&ref->refs)) {
162 WARN_ON(ref->in_tree);
163 kfree(ref);
164 }
165}
166
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200167int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
168 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400169 u64 bytenr, u64 num_bytes, u64 parent,
170 u64 ref_root, int level, int action,
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200171 struct btrfs_delayed_extent_op *extent_op,
172 int for_cow);
173int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
174 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400175 u64 bytenr, u64 num_bytes,
176 u64 parent, u64 ref_root,
177 u64 owner, u64 offset, int action,
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200178 struct btrfs_delayed_extent_op *extent_op,
179 int for_cow);
180int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
181 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400182 u64 bytenr, u64 num_bytes,
183 struct btrfs_delayed_extent_op *extent_op);
Chris Mason56bec292009-03-13 10:10:06 -0400184
Chris Mason1887be62009-03-13 10:11:24 -0400185struct btrfs_delayed_ref_head *
186btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
Chris Masonc3e69d52009-03-13 10:17:05 -0400187int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
188 struct btrfs_delayed_ref_head *head);
189int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
190 struct list_head *cluster, u64 search_start);
Arne Jansen00f04b82011-09-14 12:37:00 +0200191
192struct seq_list {
193 struct list_head list;
194 u64 seq;
195};
196
197static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
198{
199 assert_spin_locked(&delayed_refs->lock);
200 ++delayed_refs->seq;
201 return delayed_refs->seq;
202}
203
204static inline void
205btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
206 struct seq_list *elem)
207{
208 assert_spin_locked(&delayed_refs->lock);
209 elem->seq = delayed_refs->seq;
210 list_add_tail(&elem->list, &delayed_refs->seq_head);
211}
212
213static inline void
214btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
215 struct seq_list *elem)
216{
217 spin_lock(&delayed_refs->lock);
218 list_del(&elem->list);
219 spin_unlock(&delayed_refs->lock);
220}
221
222int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
223 u64 seq);
224
225/*
226 * delayed refs with a ref_seq > 0 must be held back during backref walking.
227 * this only applies to items in one of the fs-trees. for_cow items never need
228 * to be held back, so they won't get a ref_seq number.
229 */
230static inline int need_ref_seq(int for_cow, u64 rootid)
231{
232 if (for_cow)
233 return 0;
234
235 if (rootid == BTRFS_FS_TREE_OBJECTID)
236 return 1;
237
238 if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
239 return 1;
240
241 return 0;
242}
243
Chris Mason56bec292009-03-13 10:10:06 -0400244/*
245 * a node might live in a head or a regular ref, this lets you
246 * test for the proper type to use.
247 */
248static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
249{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400250 return node->is_head;
Chris Mason56bec292009-03-13 10:10:06 -0400251}
252
253/*
254 * helper functions to cast a node into its container
255 */
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400256static inline struct btrfs_delayed_tree_ref *
257btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
Chris Mason56bec292009-03-13 10:10:06 -0400258{
259 WARN_ON(btrfs_delayed_ref_is_head(node));
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400260 return container_of(node, struct btrfs_delayed_tree_ref, node);
261}
Chris Mason56bec292009-03-13 10:10:06 -0400262
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400263static inline struct btrfs_delayed_data_ref *
264btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
265{
266 WARN_ON(btrfs_delayed_ref_is_head(node));
267 return container_of(node, struct btrfs_delayed_data_ref, node);
Chris Mason56bec292009-03-13 10:10:06 -0400268}
269
270static inline struct btrfs_delayed_ref_head *
271btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
272{
273 WARN_ON(!btrfs_delayed_ref_is_head(node));
274 return container_of(node, struct btrfs_delayed_ref_head, node);
Chris Mason56bec292009-03-13 10:10:06 -0400275}
276#endif