blob: 57b9805873f793e6611c71d531d308b32b6de660 [file] [log] [blame]
Arne Jansenbed92ea2012-06-28 18:03:02 +02001/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000026#include <linux/btrfs.h>
Arne Jansenbed92ea2012-06-28 18:03:02 +020027
28#include "ctree.h"
29#include "transaction.h"
30#include "disk-io.h"
31#include "locking.h"
32#include "ulist.h"
Arne Jansenbed92ea2012-06-28 18:03:02 +020033#include "backref.h"
Jan Schmidt2f232032013-04-25 16:04:51 +000034#include "extent_io.h"
Arne Jansenbed92ea2012-06-28 18:03:02 +020035
36/* TODO XXX FIXME
37 * - subvol delete -> delete when ref goes to 0? delete limits also?
38 * - reorganize keys
39 * - compressed
40 * - sync
Arne Jansenbed92ea2012-06-28 18:03:02 +020041 * - copy also limits on subvol creation
42 * - limit
43 * - caches fuer ulists
44 * - performance benchmarks
45 * - check all ioctl parameters
46 */
47
48/*
49 * one struct for each qgroup, organized in fs_info->qgroup_tree.
50 */
51struct btrfs_qgroup {
52 u64 qgroupid;
53
54 /*
55 * state
56 */
57 u64 rfer; /* referenced */
58 u64 rfer_cmpr; /* referenced compressed */
59 u64 excl; /* exclusive */
60 u64 excl_cmpr; /* exclusive compressed */
61
62 /*
63 * limits
64 */
65 u64 lim_flags; /* which limits are set */
66 u64 max_rfer;
67 u64 max_excl;
68 u64 rsv_rfer;
69 u64 rsv_excl;
70
71 /*
72 * reservation tracking
73 */
74 u64 reserved;
75
76 /*
77 * lists
78 */
79 struct list_head groups; /* groups this group is member of */
80 struct list_head members; /* groups that are members of this group */
81 struct list_head dirty; /* dirty groups */
82 struct rb_node node; /* tree of qgroups */
83
84 /*
85 * temp variables for accounting operations
86 */
87 u64 tag;
88 u64 refcnt;
89};
90
91/*
92 * glue structure to represent the relations between qgroups.
93 */
94struct btrfs_qgroup_list {
95 struct list_head next_group;
96 struct list_head next_member;
97 struct btrfs_qgroup *group;
98 struct btrfs_qgroup *member;
99};
100
Jan Schmidtb382a322013-05-28 15:47:24 +0000101static int
102qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
103 int init_flags);
104static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
Jan Schmidt2f232032013-04-25 16:04:51 +0000105
Wang Shilong58400fc2013-04-07 10:50:17 +0000106/* must be called with qgroup_ioctl_lock held */
Arne Jansenbed92ea2012-06-28 18:03:02 +0200107static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
108 u64 qgroupid)
109{
110 struct rb_node *n = fs_info->qgroup_tree.rb_node;
111 struct btrfs_qgroup *qgroup;
112
113 while (n) {
114 qgroup = rb_entry(n, struct btrfs_qgroup, node);
115 if (qgroup->qgroupid < qgroupid)
116 n = n->rb_left;
117 else if (qgroup->qgroupid > qgroupid)
118 n = n->rb_right;
119 else
120 return qgroup;
121 }
122 return NULL;
123}
124
125/* must be called with qgroup_lock held */
126static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
127 u64 qgroupid)
128{
129 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
130 struct rb_node *parent = NULL;
131 struct btrfs_qgroup *qgroup;
132
133 while (*p) {
134 parent = *p;
135 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
136
137 if (qgroup->qgroupid < qgroupid)
138 p = &(*p)->rb_left;
139 else if (qgroup->qgroupid > qgroupid)
140 p = &(*p)->rb_right;
141 else
142 return qgroup;
143 }
144
145 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
146 if (!qgroup)
147 return ERR_PTR(-ENOMEM);
148
149 qgroup->qgroupid = qgroupid;
150 INIT_LIST_HEAD(&qgroup->groups);
151 INIT_LIST_HEAD(&qgroup->members);
152 INIT_LIST_HEAD(&qgroup->dirty);
153
154 rb_link_node(&qgroup->node, parent, p);
155 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
156
157 return qgroup;
158}
159
Wang Shilong4082bd32013-08-14 09:13:36 +0800160static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200161{
Arne Jansenbed92ea2012-06-28 18:03:02 +0200162 struct btrfs_qgroup_list *list;
163
Arne Jansenbed92ea2012-06-28 18:03:02 +0200164 list_del(&qgroup->dirty);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200165 while (!list_empty(&qgroup->groups)) {
166 list = list_first_entry(&qgroup->groups,
167 struct btrfs_qgroup_list, next_group);
168 list_del(&list->next_group);
169 list_del(&list->next_member);
170 kfree(list);
171 }
172
173 while (!list_empty(&qgroup->members)) {
174 list = list_first_entry(&qgroup->members,
175 struct btrfs_qgroup_list, next_member);
176 list_del(&list->next_group);
177 list_del(&list->next_member);
178 kfree(list);
179 }
180 kfree(qgroup);
Wang Shilong4082bd32013-08-14 09:13:36 +0800181}
Arne Jansenbed92ea2012-06-28 18:03:02 +0200182
Wang Shilong4082bd32013-08-14 09:13:36 +0800183/* must be called with qgroup_lock held */
184static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
185{
186 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
187
188 if (!qgroup)
189 return -ENOENT;
190
191 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
192 __del_qgroup_rb(qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200193 return 0;
194}
195
196/* must be called with qgroup_lock held */
197static int add_relation_rb(struct btrfs_fs_info *fs_info,
198 u64 memberid, u64 parentid)
199{
200 struct btrfs_qgroup *member;
201 struct btrfs_qgroup *parent;
202 struct btrfs_qgroup_list *list;
203
204 member = find_qgroup_rb(fs_info, memberid);
205 parent = find_qgroup_rb(fs_info, parentid);
206 if (!member || !parent)
207 return -ENOENT;
208
209 list = kzalloc(sizeof(*list), GFP_ATOMIC);
210 if (!list)
211 return -ENOMEM;
212
213 list->group = parent;
214 list->member = member;
215 list_add_tail(&list->next_group, &member->groups);
216 list_add_tail(&list->next_member, &parent->members);
217
218 return 0;
219}
220
221/* must be called with qgroup_lock held */
222static int del_relation_rb(struct btrfs_fs_info *fs_info,
223 u64 memberid, u64 parentid)
224{
225 struct btrfs_qgroup *member;
226 struct btrfs_qgroup *parent;
227 struct btrfs_qgroup_list *list;
228
229 member = find_qgroup_rb(fs_info, memberid);
230 parent = find_qgroup_rb(fs_info, parentid);
231 if (!member || !parent)
232 return -ENOENT;
233
234 list_for_each_entry(list, &member->groups, next_group) {
235 if (list->group == parent) {
236 list_del(&list->next_group);
237 list_del(&list->next_member);
238 kfree(list);
239 return 0;
240 }
241 }
242 return -ENOENT;
243}
244
245/*
246 * The full config is read in one go, only called from open_ctree()
247 * It doesn't use any locking, as at this point we're still single-threaded
248 */
249int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
250{
251 struct btrfs_key key;
252 struct btrfs_key found_key;
253 struct btrfs_root *quota_root = fs_info->quota_root;
254 struct btrfs_path *path = NULL;
255 struct extent_buffer *l;
256 int slot;
257 int ret = 0;
258 u64 flags = 0;
Jan Schmidtb382a322013-05-28 15:47:24 +0000259 u64 rescan_progress = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200260
261 if (!fs_info->quota_enabled)
262 return 0;
263
Wang Shilong1e8f9152013-05-06 11:03:27 +0000264 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
265 if (!fs_info->qgroup_ulist) {
266 ret = -ENOMEM;
267 goto out;
268 }
269
Arne Jansenbed92ea2012-06-28 18:03:02 +0200270 path = btrfs_alloc_path();
271 if (!path) {
272 ret = -ENOMEM;
273 goto out;
274 }
275
276 /* default this to quota off, in case no status key is found */
277 fs_info->qgroup_flags = 0;
278
279 /*
280 * pass 1: read status, all qgroup infos and limits
281 */
282 key.objectid = 0;
283 key.type = 0;
284 key.offset = 0;
285 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
286 if (ret)
287 goto out;
288
289 while (1) {
290 struct btrfs_qgroup *qgroup;
291
292 slot = path->slots[0];
293 l = path->nodes[0];
294 btrfs_item_key_to_cpu(l, &found_key, slot);
295
296 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
297 struct btrfs_qgroup_status_item *ptr;
298
299 ptr = btrfs_item_ptr(l, slot,
300 struct btrfs_qgroup_status_item);
301
302 if (btrfs_qgroup_status_version(l, ptr) !=
303 BTRFS_QGROUP_STATUS_VERSION) {
304 printk(KERN_ERR
305 "btrfs: old qgroup version, quota disabled\n");
306 goto out;
307 }
308 if (btrfs_qgroup_status_generation(l, ptr) !=
309 fs_info->generation) {
310 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
311 printk(KERN_ERR
312 "btrfs: qgroup generation mismatch, "
313 "marked as inconsistent\n");
314 }
315 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
316 ptr);
Jan Schmidtb382a322013-05-28 15:47:24 +0000317 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200318 goto next1;
319 }
320
321 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
322 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
323 goto next1;
324
325 qgroup = find_qgroup_rb(fs_info, found_key.offset);
326 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
327 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
328 printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
330 }
331 if (!qgroup) {
332 qgroup = add_qgroup_rb(fs_info, found_key.offset);
333 if (IS_ERR(qgroup)) {
334 ret = PTR_ERR(qgroup);
335 goto out;
336 }
337 }
338 switch (found_key.type) {
339 case BTRFS_QGROUP_INFO_KEY: {
340 struct btrfs_qgroup_info_item *ptr;
341
342 ptr = btrfs_item_ptr(l, slot,
343 struct btrfs_qgroup_info_item);
344 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
345 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
346 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
347 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
348 /* generation currently unused */
349 break;
350 }
351 case BTRFS_QGROUP_LIMIT_KEY: {
352 struct btrfs_qgroup_limit_item *ptr;
353
354 ptr = btrfs_item_ptr(l, slot,
355 struct btrfs_qgroup_limit_item);
356 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
357 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
358 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
359 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
360 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
361 break;
362 }
363 }
364next1:
365 ret = btrfs_next_item(quota_root, path);
366 if (ret < 0)
367 goto out;
368 if (ret)
369 break;
370 }
371 btrfs_release_path(path);
372
373 /*
374 * pass 2: read all qgroup relations
375 */
376 key.objectid = 0;
377 key.type = BTRFS_QGROUP_RELATION_KEY;
378 key.offset = 0;
379 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
380 if (ret)
381 goto out;
382 while (1) {
383 slot = path->slots[0];
384 l = path->nodes[0];
385 btrfs_item_key_to_cpu(l, &found_key, slot);
386
387 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
388 goto next2;
389
390 if (found_key.objectid > found_key.offset) {
391 /* parent <- member, not needed to build config */
392 /* FIXME should we omit the key completely? */
393 goto next2;
394 }
395
396 ret = add_relation_rb(fs_info, found_key.objectid,
397 found_key.offset);
Arne Jansenff248582013-01-17 01:22:08 -0700398 if (ret == -ENOENT) {
399 printk(KERN_WARNING
400 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
401 (unsigned long long)found_key.objectid,
402 (unsigned long long)found_key.offset);
403 ret = 0; /* ignore the error */
404 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200405 if (ret)
406 goto out;
407next2:
408 ret = btrfs_next_item(quota_root, path);
409 if (ret < 0)
410 goto out;
411 if (ret)
412 break;
413 }
414out:
415 fs_info->qgroup_flags |= flags;
416 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
417 fs_info->quota_enabled = 0;
418 fs_info->pending_quota_state = 0;
Jan Schmidtb382a322013-05-28 15:47:24 +0000419 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
420 ret >= 0) {
421 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200422 }
423 btrfs_free_path(path);
424
Jan Schmidteb1716a2013-05-28 15:47:23 +0000425 if (ret < 0) {
Wang Shilong1e8f9152013-05-06 11:03:27 +0000426 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000427 fs_info->qgroup_ulist = NULL;
Jan Schmidtb382a322013-05-28 15:47:24 +0000428 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
Jan Schmidteb1716a2013-05-28 15:47:23 +0000429 }
Wang Shilong1e8f9152013-05-06 11:03:27 +0000430
Arne Jansenbed92ea2012-06-28 18:03:02 +0200431 return ret < 0 ? ret : 0;
432}
433
434/*
435 * This is only called from close_ctree() or open_ctree(), both in single-
436 * treaded paths. Clean up the in-memory structures. No locking needed.
437 */
438void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
439{
440 struct rb_node *n;
441 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200442
443 while ((n = rb_first(&fs_info->qgroup_tree))) {
444 qgroup = rb_entry(n, struct btrfs_qgroup, node);
445 rb_erase(n, &fs_info->qgroup_tree);
Wang Shilong4082bd32013-08-14 09:13:36 +0800446 __del_qgroup_rb(qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200447 }
Wang Shilong1e7bac12013-07-13 21:02:54 +0800448 /*
449 * we call btrfs_free_qgroup_config() when umounting
450 * filesystem and disabling quota, so we set qgroup_ulit
451 * to be null here to avoid double free.
452 */
Wang Shilong1e8f9152013-05-06 11:03:27 +0000453 ulist_free(fs_info->qgroup_ulist);
Wang Shilong1e7bac12013-07-13 21:02:54 +0800454 fs_info->qgroup_ulist = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200455}
456
457static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
458 struct btrfs_root *quota_root,
459 u64 src, u64 dst)
460{
461 int ret;
462 struct btrfs_path *path;
463 struct btrfs_key key;
464
465 path = btrfs_alloc_path();
466 if (!path)
467 return -ENOMEM;
468
469 key.objectid = src;
470 key.type = BTRFS_QGROUP_RELATION_KEY;
471 key.offset = dst;
472
473 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
474
475 btrfs_mark_buffer_dirty(path->nodes[0]);
476
477 btrfs_free_path(path);
478 return ret;
479}
480
481static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
482 struct btrfs_root *quota_root,
483 u64 src, u64 dst)
484{
485 int ret;
486 struct btrfs_path *path;
487 struct btrfs_key key;
488
489 path = btrfs_alloc_path();
490 if (!path)
491 return -ENOMEM;
492
493 key.objectid = src;
494 key.type = BTRFS_QGROUP_RELATION_KEY;
495 key.offset = dst;
496
497 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
498 if (ret < 0)
499 goto out;
500
501 if (ret > 0) {
502 ret = -ENOENT;
503 goto out;
504 }
505
506 ret = btrfs_del_item(trans, quota_root, path);
507out:
508 btrfs_free_path(path);
509 return ret;
510}
511
512static int add_qgroup_item(struct btrfs_trans_handle *trans,
513 struct btrfs_root *quota_root, u64 qgroupid)
514{
515 int ret;
516 struct btrfs_path *path;
517 struct btrfs_qgroup_info_item *qgroup_info;
518 struct btrfs_qgroup_limit_item *qgroup_limit;
519 struct extent_buffer *leaf;
520 struct btrfs_key key;
521
522 path = btrfs_alloc_path();
523 if (!path)
524 return -ENOMEM;
525
526 key.objectid = 0;
527 key.type = BTRFS_QGROUP_INFO_KEY;
528 key.offset = qgroupid;
529
530 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
531 sizeof(*qgroup_info));
532 if (ret)
533 goto out;
534
535 leaf = path->nodes[0];
536 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
537 struct btrfs_qgroup_info_item);
538 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
539 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
540 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
541 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
542 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
543
544 btrfs_mark_buffer_dirty(leaf);
545
546 btrfs_release_path(path);
547
548 key.type = BTRFS_QGROUP_LIMIT_KEY;
549 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
550 sizeof(*qgroup_limit));
551 if (ret)
552 goto out;
553
554 leaf = path->nodes[0];
555 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
556 struct btrfs_qgroup_limit_item);
557 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
558 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
559 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
560 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
561 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
562
563 btrfs_mark_buffer_dirty(leaf);
564
565 ret = 0;
566out:
567 btrfs_free_path(path);
568 return ret;
569}
570
571static int del_qgroup_item(struct btrfs_trans_handle *trans,
572 struct btrfs_root *quota_root, u64 qgroupid)
573{
574 int ret;
575 struct btrfs_path *path;
576 struct btrfs_key key;
577
578 path = btrfs_alloc_path();
579 if (!path)
580 return -ENOMEM;
581
582 key.objectid = 0;
583 key.type = BTRFS_QGROUP_INFO_KEY;
584 key.offset = qgroupid;
585 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
586 if (ret < 0)
587 goto out;
588
589 if (ret > 0) {
590 ret = -ENOENT;
591 goto out;
592 }
593
594 ret = btrfs_del_item(trans, quota_root, path);
595 if (ret)
596 goto out;
597
598 btrfs_release_path(path);
599
600 key.type = BTRFS_QGROUP_LIMIT_KEY;
601 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
602 if (ret < 0)
603 goto out;
604
605 if (ret > 0) {
606 ret = -ENOENT;
607 goto out;
608 }
609
610 ret = btrfs_del_item(trans, quota_root, path);
611
612out:
613 btrfs_free_path(path);
614 return ret;
615}
616
617static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
618 struct btrfs_root *root, u64 qgroupid,
619 u64 flags, u64 max_rfer, u64 max_excl,
620 u64 rsv_rfer, u64 rsv_excl)
621{
622 struct btrfs_path *path;
623 struct btrfs_key key;
624 struct extent_buffer *l;
625 struct btrfs_qgroup_limit_item *qgroup_limit;
626 int ret;
627 int slot;
628
629 key.objectid = 0;
630 key.type = BTRFS_QGROUP_LIMIT_KEY;
631 key.offset = qgroupid;
632
633 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000634 if (!path)
635 return -ENOMEM;
636
Arne Jansenbed92ea2012-06-28 18:03:02 +0200637 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
638 if (ret > 0)
639 ret = -ENOENT;
640
641 if (ret)
642 goto out;
643
644 l = path->nodes[0];
645 slot = path->slots[0];
646 qgroup_limit = btrfs_item_ptr(l, path->slots[0],
647 struct btrfs_qgroup_limit_item);
648 btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
649 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
650 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
651 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
652 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
653
654 btrfs_mark_buffer_dirty(l);
655
656out:
657 btrfs_free_path(path);
658 return ret;
659}
660
661static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
662 struct btrfs_root *root,
663 struct btrfs_qgroup *qgroup)
664{
665 struct btrfs_path *path;
666 struct btrfs_key key;
667 struct extent_buffer *l;
668 struct btrfs_qgroup_info_item *qgroup_info;
669 int ret;
670 int slot;
671
672 key.objectid = 0;
673 key.type = BTRFS_QGROUP_INFO_KEY;
674 key.offset = qgroup->qgroupid;
675
676 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000677 if (!path)
678 return -ENOMEM;
679
Arne Jansenbed92ea2012-06-28 18:03:02 +0200680 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
681 if (ret > 0)
682 ret = -ENOENT;
683
684 if (ret)
685 goto out;
686
687 l = path->nodes[0];
688 slot = path->slots[0];
689 qgroup_info = btrfs_item_ptr(l, path->slots[0],
690 struct btrfs_qgroup_info_item);
691 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
692 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
693 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
694 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
695 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
696
697 btrfs_mark_buffer_dirty(l);
698
699out:
700 btrfs_free_path(path);
701 return ret;
702}
703
704static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
705 struct btrfs_fs_info *fs_info,
706 struct btrfs_root *root)
707{
708 struct btrfs_path *path;
709 struct btrfs_key key;
710 struct extent_buffer *l;
711 struct btrfs_qgroup_status_item *ptr;
712 int ret;
713 int slot;
714
715 key.objectid = 0;
716 key.type = BTRFS_QGROUP_STATUS_KEY;
717 key.offset = 0;
718
719 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000720 if (!path)
721 return -ENOMEM;
722
Arne Jansenbed92ea2012-06-28 18:03:02 +0200723 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
724 if (ret > 0)
725 ret = -ENOENT;
726
727 if (ret)
728 goto out;
729
730 l = path->nodes[0];
731 slot = path->slots[0];
732 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
733 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
734 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
Jan Schmidt2f232032013-04-25 16:04:51 +0000735 btrfs_set_qgroup_status_rescan(l, ptr,
736 fs_info->qgroup_rescan_progress.objectid);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200737
738 btrfs_mark_buffer_dirty(l);
739
740out:
741 btrfs_free_path(path);
742 return ret;
743}
744
745/*
746 * called with qgroup_lock held
747 */
748static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
749 struct btrfs_root *root)
750{
751 struct btrfs_path *path;
752 struct btrfs_key key;
Wang Shilong06b3a862013-02-27 11:16:57 +0000753 struct extent_buffer *leaf = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200754 int ret;
Wang Shilong06b3a862013-02-27 11:16:57 +0000755 int nr = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200756
Arne Jansenbed92ea2012-06-28 18:03:02 +0200757 path = btrfs_alloc_path();
758 if (!path)
759 return -ENOMEM;
760
Wang Shilong06b3a862013-02-27 11:16:57 +0000761 path->leave_spinning = 1;
762
763 key.objectid = 0;
764 key.offset = 0;
765 key.type = 0;
766
Arne Jansenbed92ea2012-06-28 18:03:02 +0200767 while (1) {
Arne Jansenbed92ea2012-06-28 18:03:02 +0200768 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Wang Shilong06b3a862013-02-27 11:16:57 +0000769 if (ret < 0)
770 goto out;
771 leaf = path->nodes[0];
772 nr = btrfs_header_nritems(leaf);
773 if (!nr)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200774 break;
Wang Shilong06b3a862013-02-27 11:16:57 +0000775 /*
776 * delete the leaf one by one
777 * since the whole tree is going
778 * to be deleted.
779 */
780 path->slots[0] = 0;
781 ret = btrfs_del_items(trans, root, path, 0, nr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200782 if (ret)
783 goto out;
Wang Shilong06b3a862013-02-27 11:16:57 +0000784
Arne Jansenbed92ea2012-06-28 18:03:02 +0200785 btrfs_release_path(path);
786 }
787 ret = 0;
788out:
789 root->fs_info->pending_quota_state = 0;
790 btrfs_free_path(path);
791 return ret;
792}
793
794int btrfs_quota_enable(struct btrfs_trans_handle *trans,
795 struct btrfs_fs_info *fs_info)
796{
797 struct btrfs_root *quota_root;
Wang Shilong7708f022013-04-07 10:24:57 +0000798 struct btrfs_root *tree_root = fs_info->tree_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200799 struct btrfs_path *path = NULL;
800 struct btrfs_qgroup_status_item *ptr;
801 struct extent_buffer *leaf;
802 struct btrfs_key key;
Wang Shilong7708f022013-04-07 10:24:57 +0000803 struct btrfs_key found_key;
804 struct btrfs_qgroup *qgroup = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200805 int ret = 0;
Wang Shilong7708f022013-04-07 10:24:57 +0000806 int slot;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200807
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000808 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200809 if (fs_info->quota_root) {
810 fs_info->pending_quota_state = 1;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200811 goto out;
812 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200813
Wang Shilong1e8f9152013-05-06 11:03:27 +0000814 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
815 if (!fs_info->qgroup_ulist) {
816 ret = -ENOMEM;
817 goto out;
818 }
819
Arne Jansenbed92ea2012-06-28 18:03:02 +0200820 /*
821 * initially create the quota tree
822 */
823 quota_root = btrfs_create_tree(trans, fs_info,
824 BTRFS_QUOTA_TREE_OBJECTID);
825 if (IS_ERR(quota_root)) {
826 ret = PTR_ERR(quota_root);
827 goto out;
828 }
829
830 path = btrfs_alloc_path();
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000831 if (!path) {
832 ret = -ENOMEM;
833 goto out_free_root;
834 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200835
836 key.objectid = 0;
837 key.type = BTRFS_QGROUP_STATUS_KEY;
838 key.offset = 0;
839
840 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
841 sizeof(*ptr));
842 if (ret)
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000843 goto out_free_path;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200844
845 leaf = path->nodes[0];
846 ptr = btrfs_item_ptr(leaf, path->slots[0],
847 struct btrfs_qgroup_status_item);
848 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
849 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
850 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
851 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
852 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
Jan Schmidt2f232032013-04-25 16:04:51 +0000853 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200854
855 btrfs_mark_buffer_dirty(leaf);
856
Wang Shilong7708f022013-04-07 10:24:57 +0000857 key.objectid = 0;
858 key.type = BTRFS_ROOT_REF_KEY;
859 key.offset = 0;
860
861 btrfs_release_path(path);
862 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
863 if (ret > 0)
864 goto out_add_root;
865 if (ret < 0)
866 goto out_free_path;
867
868
869 while (1) {
870 slot = path->slots[0];
871 leaf = path->nodes[0];
872 btrfs_item_key_to_cpu(leaf, &found_key, slot);
873
874 if (found_key.type == BTRFS_ROOT_REF_KEY) {
875 ret = add_qgroup_item(trans, quota_root,
876 found_key.offset);
877 if (ret)
878 goto out_free_path;
879
Wang Shilong7708f022013-04-07 10:24:57 +0000880 qgroup = add_qgroup_rb(fs_info, found_key.offset);
881 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +0000882 ret = PTR_ERR(qgroup);
883 goto out_free_path;
884 }
Wang Shilong7708f022013-04-07 10:24:57 +0000885 }
886 ret = btrfs_next_item(tree_root, path);
887 if (ret < 0)
888 goto out_free_path;
889 if (ret)
890 break;
891 }
892
893out_add_root:
894 btrfs_release_path(path);
895 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
896 if (ret)
897 goto out_free_path;
898
Wang Shilong7708f022013-04-07 10:24:57 +0000899 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
900 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +0000901 ret = PTR_ERR(qgroup);
902 goto out_free_path;
903 }
Wang Shilong58400fc2013-04-07 10:50:17 +0000904 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200905 fs_info->quota_root = quota_root;
906 fs_info->pending_quota_state = 1;
907 spin_unlock(&fs_info->qgroup_lock);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000908out_free_path:
Arne Jansenbed92ea2012-06-28 18:03:02 +0200909 btrfs_free_path(path);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000910out_free_root:
911 if (ret) {
912 free_extent_buffer(quota_root->node);
913 free_extent_buffer(quota_root->commit_root);
914 kfree(quota_root);
915 }
916out:
Jan Schmidteb1716a2013-05-28 15:47:23 +0000917 if (ret) {
Wang Shilong1e8f9152013-05-06 11:03:27 +0000918 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000919 fs_info->qgroup_ulist = NULL;
920 }
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000921 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200922 return ret;
923}
924
925int btrfs_quota_disable(struct btrfs_trans_handle *trans,
926 struct btrfs_fs_info *fs_info)
927{
928 struct btrfs_root *tree_root = fs_info->tree_root;
929 struct btrfs_root *quota_root;
930 int ret = 0;
931
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000932 mutex_lock(&fs_info->qgroup_ioctl_lock);
Wang Shilong58400fc2013-04-07 10:50:17 +0000933 if (!fs_info->quota_root)
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000934 goto out;
Wang Shilong58400fc2013-04-07 10:50:17 +0000935 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200936 fs_info->quota_enabled = 0;
937 fs_info->pending_quota_state = 0;
938 quota_root = fs_info->quota_root;
939 fs_info->quota_root = NULL;
940 btrfs_free_qgroup_config(fs_info);
941 spin_unlock(&fs_info->qgroup_lock);
942
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000943 if (!quota_root) {
944 ret = -EINVAL;
945 goto out;
946 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200947
948 ret = btrfs_clean_quota_tree(trans, quota_root);
949 if (ret)
950 goto out;
951
952 ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
953 if (ret)
954 goto out;
955
956 list_del(&quota_root->dirty_list);
957
958 btrfs_tree_lock(quota_root->node);
959 clean_tree_block(trans, tree_root, quota_root->node);
960 btrfs_tree_unlock(quota_root->node);
961 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
962
963 free_extent_buffer(quota_root->node);
964 free_extent_buffer(quota_root->commit_root);
965 kfree(quota_root);
966out:
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000967 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200968 return ret;
969}
970
Jan Schmidt2f232032013-04-25 16:04:51 +0000971static void qgroup_dirty(struct btrfs_fs_info *fs_info,
972 struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200973{
Jan Schmidt2f232032013-04-25 16:04:51 +0000974 if (list_empty(&qgroup->dirty))
975 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200976}
977
978int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
979 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
980{
981 struct btrfs_root *quota_root;
Wang Shilongb7fef4f2013-04-07 10:50:18 +0000982 struct btrfs_qgroup *parent;
983 struct btrfs_qgroup *member;
Wang Shilong534e6622013-04-17 14:49:51 +0000984 struct btrfs_qgroup_list *list;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200985 int ret = 0;
986
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000987 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200988 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000989 if (!quota_root) {
990 ret = -EINVAL;
991 goto out;
992 }
Wang Shilongb7fef4f2013-04-07 10:50:18 +0000993 member = find_qgroup_rb(fs_info, src);
994 parent = find_qgroup_rb(fs_info, dst);
995 if (!member || !parent) {
996 ret = -EINVAL;
997 goto out;
998 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200999
Wang Shilong534e6622013-04-17 14:49:51 +00001000 /* check if such qgroup relation exist firstly */
1001 list_for_each_entry(list, &member->groups, next_group) {
1002 if (list->group == parent) {
1003 ret = -EEXIST;
1004 goto out;
1005 }
1006 }
1007
Arne Jansenbed92ea2012-06-28 18:03:02 +02001008 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1009 if (ret)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001010 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001011
1012 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1013 if (ret) {
1014 del_qgroup_relation_item(trans, quota_root, src, dst);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001015 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001016 }
1017
1018 spin_lock(&fs_info->qgroup_lock);
1019 ret = add_relation_rb(quota_root->fs_info, src, dst);
1020 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001021out:
1022 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001023 return ret;
1024}
1025
1026int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1027 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1028{
1029 struct btrfs_root *quota_root;
Wang Shilong534e6622013-04-17 14:49:51 +00001030 struct btrfs_qgroup *parent;
1031 struct btrfs_qgroup *member;
1032 struct btrfs_qgroup_list *list;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001033 int ret = 0;
1034 int err;
1035
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001036 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001037 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001038 if (!quota_root) {
1039 ret = -EINVAL;
1040 goto out;
1041 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001042
Wang Shilong534e6622013-04-17 14:49:51 +00001043 member = find_qgroup_rb(fs_info, src);
1044 parent = find_qgroup_rb(fs_info, dst);
1045 if (!member || !parent) {
1046 ret = -EINVAL;
1047 goto out;
1048 }
1049
1050 /* check if such qgroup relation exist firstly */
1051 list_for_each_entry(list, &member->groups, next_group) {
1052 if (list->group == parent)
1053 goto exist;
1054 }
1055 ret = -ENOENT;
1056 goto out;
1057exist:
Arne Jansenbed92ea2012-06-28 18:03:02 +02001058 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1059 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1060 if (err && !ret)
1061 ret = err;
1062
1063 spin_lock(&fs_info->qgroup_lock);
1064 del_relation_rb(fs_info, src, dst);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001065 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001066out:
1067 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001068 return ret;
1069}
1070
1071int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1072 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1073{
1074 struct btrfs_root *quota_root;
1075 struct btrfs_qgroup *qgroup;
1076 int ret = 0;
1077
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001078 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001079 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001080 if (!quota_root) {
1081 ret = -EINVAL;
1082 goto out;
1083 }
Wang Shilong534e6622013-04-17 14:49:51 +00001084 qgroup = find_qgroup_rb(fs_info, qgroupid);
1085 if (qgroup) {
1086 ret = -EEXIST;
1087 goto out;
1088 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001089
1090 ret = add_qgroup_item(trans, quota_root, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001091 if (ret)
1092 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001093
1094 spin_lock(&fs_info->qgroup_lock);
1095 qgroup = add_qgroup_rb(fs_info, qgroupid);
1096 spin_unlock(&fs_info->qgroup_lock);
1097
1098 if (IS_ERR(qgroup))
1099 ret = PTR_ERR(qgroup);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001100out:
1101 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001102 return ret;
1103}
1104
1105int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1106 struct btrfs_fs_info *fs_info, u64 qgroupid)
1107{
1108 struct btrfs_root *quota_root;
Arne Jansen2cf68702013-01-17 01:22:09 -07001109 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001110 int ret = 0;
1111
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001112 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001113 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001114 if (!quota_root) {
1115 ret = -EINVAL;
1116 goto out;
1117 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001118
Arne Jansen2cf68702013-01-17 01:22:09 -07001119 qgroup = find_qgroup_rb(fs_info, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001120 if (!qgroup) {
1121 ret = -ENOENT;
1122 goto out;
1123 } else {
1124 /* check if there are no relations to this qgroup */
1125 if (!list_empty(&qgroup->groups) ||
1126 !list_empty(&qgroup->members)) {
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001127 ret = -EBUSY;
1128 goto out;
Arne Jansen2cf68702013-01-17 01:22:09 -07001129 }
1130 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001131 ret = del_qgroup_item(trans, quota_root, qgroupid);
1132
1133 spin_lock(&fs_info->qgroup_lock);
1134 del_qgroup_rb(quota_root->fs_info, qgroupid);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001135 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001136out:
1137 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001138 return ret;
1139}
1140
1141int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1142 struct btrfs_fs_info *fs_info, u64 qgroupid,
1143 struct btrfs_qgroup_limit *limit)
1144{
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001145 struct btrfs_root *quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001146 struct btrfs_qgroup *qgroup;
1147 int ret = 0;
1148
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001149 mutex_lock(&fs_info->qgroup_ioctl_lock);
1150 quota_root = fs_info->quota_root;
1151 if (!quota_root) {
1152 ret = -EINVAL;
1153 goto out;
1154 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001155
Wang Shilongddb47af2013-04-07 10:50:20 +00001156 qgroup = find_qgroup_rb(fs_info, qgroupid);
1157 if (!qgroup) {
1158 ret = -ENOENT;
1159 goto out;
1160 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001161 ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
1162 limit->flags, limit->max_rfer,
1163 limit->max_excl, limit->rsv_rfer,
1164 limit->rsv_excl);
1165 if (ret) {
1166 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1167 printk(KERN_INFO "unable to update quota limit for %llu\n",
1168 (unsigned long long)qgroupid);
1169 }
1170
Wang Shilong58400fc2013-04-07 10:50:17 +00001171 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001172 qgroup->lim_flags = limit->flags;
1173 qgroup->max_rfer = limit->max_rfer;
1174 qgroup->max_excl = limit->max_excl;
1175 qgroup->rsv_rfer = limit->rsv_rfer;
1176 qgroup->rsv_excl = limit->rsv_excl;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001177 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001178out:
1179 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001180 return ret;
1181}
1182
Arne Jansenbed92ea2012-06-28 18:03:02 +02001183/*
1184 * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1185 * the modification into a list that's later used by btrfs_end_transaction to
1186 * pass the recorded modifications on to btrfs_qgroup_account_ref.
1187 */
1188int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1189 struct btrfs_delayed_ref_node *node,
1190 struct btrfs_delayed_extent_op *extent_op)
1191{
1192 struct qgroup_update *u;
1193
1194 BUG_ON(!trans->delayed_ref_elem.seq);
1195 u = kmalloc(sizeof(*u), GFP_NOFS);
1196 if (!u)
1197 return -ENOMEM;
1198
1199 u->node = node;
1200 u->extent_op = extent_op;
1201 list_add_tail(&u->list, &trans->qgroup_ref_list);
1202
1203 return 0;
1204}
1205
Jan Schmidt46b665c2013-04-25 16:04:50 +00001206static int qgroup_account_ref_step1(struct btrfs_fs_info *fs_info,
1207 struct ulist *roots, struct ulist *tmp,
1208 u64 seq)
1209{
1210 struct ulist_node *unode;
1211 struct ulist_iterator uiter;
1212 struct ulist_node *tmp_unode;
1213 struct ulist_iterator tmp_uiter;
1214 struct btrfs_qgroup *qg;
1215 int ret;
1216
1217 ULIST_ITER_INIT(&uiter);
1218 while ((unode = ulist_next(roots, &uiter))) {
1219 qg = find_qgroup_rb(fs_info, unode->val);
1220 if (!qg)
1221 continue;
1222
1223 ulist_reinit(tmp);
1224 /* XXX id not needed */
1225 ret = ulist_add(tmp, qg->qgroupid,
1226 (u64)(uintptr_t)qg, GFP_ATOMIC);
1227 if (ret < 0)
1228 return ret;
1229 ULIST_ITER_INIT(&tmp_uiter);
1230 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1231 struct btrfs_qgroup_list *glist;
1232
1233 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1234 if (qg->refcnt < seq)
1235 qg->refcnt = seq + 1;
1236 else
1237 ++qg->refcnt;
1238
1239 list_for_each_entry(glist, &qg->groups, next_group) {
1240 ret = ulist_add(tmp, glist->group->qgroupid,
1241 (u64)(uintptr_t)glist->group,
1242 GFP_ATOMIC);
1243 if (ret < 0)
1244 return ret;
1245 }
1246 }
1247 }
1248
1249 return 0;
1250}
1251
1252static int qgroup_account_ref_step2(struct btrfs_fs_info *fs_info,
1253 struct ulist *roots, struct ulist *tmp,
1254 u64 seq, int sgn, u64 num_bytes,
1255 struct btrfs_qgroup *qgroup)
1256{
1257 struct ulist_node *unode;
1258 struct ulist_iterator uiter;
1259 struct btrfs_qgroup *qg;
1260 struct btrfs_qgroup_list *glist;
1261 int ret;
1262
1263 ulist_reinit(tmp);
1264 ret = ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1265 if (ret < 0)
1266 return ret;
1267
1268 ULIST_ITER_INIT(&uiter);
1269 while ((unode = ulist_next(tmp, &uiter))) {
1270 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1271 if (qg->refcnt < seq) {
1272 /* not visited by step 1 */
1273 qg->rfer += sgn * num_bytes;
1274 qg->rfer_cmpr += sgn * num_bytes;
1275 if (roots->nnodes == 0) {
1276 qg->excl += sgn * num_bytes;
1277 qg->excl_cmpr += sgn * num_bytes;
1278 }
1279 qgroup_dirty(fs_info, qg);
1280 }
1281 WARN_ON(qg->tag >= seq);
1282 qg->tag = seq;
1283
1284 list_for_each_entry(glist, &qg->groups, next_group) {
1285 ret = ulist_add(tmp, glist->group->qgroupid,
1286 (uintptr_t)glist->group, GFP_ATOMIC);
1287 if (ret < 0)
1288 return ret;
1289 }
1290 }
1291
1292 return 0;
1293}
1294
1295static int qgroup_account_ref_step3(struct btrfs_fs_info *fs_info,
1296 struct ulist *roots, struct ulist *tmp,
1297 u64 seq, int sgn, u64 num_bytes)
1298{
1299 struct ulist_node *unode;
1300 struct ulist_iterator uiter;
1301 struct btrfs_qgroup *qg;
1302 struct ulist_node *tmp_unode;
1303 struct ulist_iterator tmp_uiter;
1304 int ret;
1305
1306 ULIST_ITER_INIT(&uiter);
1307 while ((unode = ulist_next(roots, &uiter))) {
1308 qg = find_qgroup_rb(fs_info, unode->val);
1309 if (!qg)
1310 continue;
1311
1312 ulist_reinit(tmp);
1313 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
1314 if (ret < 0)
1315 return ret;
1316
1317 ULIST_ITER_INIT(&tmp_uiter);
1318 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1319 struct btrfs_qgroup_list *glist;
1320
1321 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1322 if (qg->tag == seq)
1323 continue;
1324
1325 if (qg->refcnt - seq == roots->nnodes) {
1326 qg->excl -= sgn * num_bytes;
1327 qg->excl_cmpr -= sgn * num_bytes;
1328 qgroup_dirty(fs_info, qg);
1329 }
1330
1331 list_for_each_entry(glist, &qg->groups, next_group) {
1332 ret = ulist_add(tmp, glist->group->qgroupid,
1333 (uintptr_t)glist->group,
1334 GFP_ATOMIC);
1335 if (ret < 0)
1336 return ret;
1337 }
1338 }
1339 }
1340
1341 return 0;
1342}
1343
Arne Jansenbed92ea2012-06-28 18:03:02 +02001344/*
1345 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1346 * from the fs. First, all roots referencing the extent are searched, and
1347 * then the space is accounted accordingly to the different roots. The
1348 * accounting algorithm works in 3 steps documented inline.
1349 */
1350int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1351 struct btrfs_fs_info *fs_info,
1352 struct btrfs_delayed_ref_node *node,
1353 struct btrfs_delayed_extent_op *extent_op)
1354{
1355 struct btrfs_key ins;
1356 struct btrfs_root *quota_root;
1357 u64 ref_root;
1358 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001359 struct ulist *roots = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001360 u64 seq;
1361 int ret = 0;
1362 int sgn;
1363
1364 if (!fs_info->quota_enabled)
1365 return 0;
1366
1367 BUG_ON(!fs_info->quota_root);
1368
1369 ins.objectid = node->bytenr;
1370 ins.offset = node->num_bytes;
1371 ins.type = BTRFS_EXTENT_ITEM_KEY;
1372
1373 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1374 node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
1375 struct btrfs_delayed_tree_ref *ref;
1376 ref = btrfs_delayed_node_to_tree_ref(node);
1377 ref_root = ref->root;
1378 } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1379 node->type == BTRFS_SHARED_DATA_REF_KEY) {
1380 struct btrfs_delayed_data_ref *ref;
1381 ref = btrfs_delayed_node_to_data_ref(node);
1382 ref_root = ref->root;
1383 } else {
1384 BUG();
1385 }
1386
1387 if (!is_fstree(ref_root)) {
1388 /*
1389 * non-fs-trees are not being accounted
1390 */
1391 return 0;
1392 }
1393
1394 switch (node->action) {
1395 case BTRFS_ADD_DELAYED_REF:
1396 case BTRFS_ADD_DELAYED_EXTENT:
1397 sgn = 1;
Jan Schmidtfc36ed72013-04-24 16:57:33 +00001398 seq = btrfs_tree_mod_seq_prev(node->seq);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001399 break;
1400 case BTRFS_DROP_DELAYED_REF:
1401 sgn = -1;
Jan Schmidtfc36ed72013-04-24 16:57:33 +00001402 seq = node->seq;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001403 break;
1404 case BTRFS_UPDATE_DELAYED_HEAD:
1405 return 0;
1406 default:
1407 BUG();
1408 }
1409
Jan Schmidt2f232032013-04-25 16:04:51 +00001410 mutex_lock(&fs_info->qgroup_rescan_lock);
1411 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1412 if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
1413 mutex_unlock(&fs_info->qgroup_rescan_lock);
1414 return 0;
1415 }
1416 }
1417 mutex_unlock(&fs_info->qgroup_rescan_lock);
1418
Arne Jansenbed92ea2012-06-28 18:03:02 +02001419 /*
1420 * the delayed ref sequence number we pass depends on the direction of
Jan Schmidtfc36ed72013-04-24 16:57:33 +00001421 * the operation. for add operations, we pass
1422 * tree_mod_log_prev_seq(node->seq) to skip
Arne Jansenbed92ea2012-06-28 18:03:02 +02001423 * the delayed ref's current sequence number, because we need the state
1424 * of the tree before the add operation. for delete operations, we pass
1425 * (node->seq) to include the delayed ref's current sequence number,
1426 * because we need the state of the tree after the delete operation.
1427 */
Jan Schmidtfc36ed72013-04-24 16:57:33 +00001428 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, seq, &roots);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001429 if (ret < 0)
Wang Shilonga7975022013-03-25 11:08:23 +00001430 return ret;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001431
1432 spin_lock(&fs_info->qgroup_lock);
Jan Schmidt2f232032013-04-25 16:04:51 +00001433
Arne Jansenbed92ea2012-06-28 18:03:02 +02001434 quota_root = fs_info->quota_root;
1435 if (!quota_root)
1436 goto unlock;
1437
1438 qgroup = find_qgroup_rb(fs_info, ref_root);
1439 if (!qgroup)
1440 goto unlock;
1441
1442 /*
1443 * step 1: for each old ref, visit all nodes once and inc refcnt
1444 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001445 ulist_reinit(fs_info->qgroup_ulist);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001446 seq = fs_info->qgroup_seq;
1447 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1448
Wang Shilong1e8f9152013-05-06 11:03:27 +00001449 ret = qgroup_account_ref_step1(fs_info, roots, fs_info->qgroup_ulist,
1450 seq);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001451 if (ret)
1452 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001453
1454 /*
1455 * step 2: walk from the new root
1456 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001457 ret = qgroup_account_ref_step2(fs_info, roots, fs_info->qgroup_ulist,
1458 seq, sgn, node->num_bytes, qgroup);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001459 if (ret)
Wang Shilong3c971852013-04-17 14:00:36 +00001460 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001461
1462 /*
1463 * step 3: walk again from old refs
1464 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001465 ret = qgroup_account_ref_step3(fs_info, roots, fs_info->qgroup_ulist,
1466 seq, sgn, node->num_bytes);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001467 if (ret)
1468 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001469
Arne Jansenbed92ea2012-06-28 18:03:02 +02001470unlock:
1471 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001472 ulist_free(roots);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001473
1474 return ret;
1475}
1476
1477/*
1478 * called from commit_transaction. Writes all changed qgroups to disk.
1479 */
1480int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1481 struct btrfs_fs_info *fs_info)
1482{
1483 struct btrfs_root *quota_root = fs_info->quota_root;
1484 int ret = 0;
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001485 int start_rescan_worker = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001486
1487 if (!quota_root)
1488 goto out;
1489
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001490 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
1491 start_rescan_worker = 1;
1492
Arne Jansenbed92ea2012-06-28 18:03:02 +02001493 fs_info->quota_enabled = fs_info->pending_quota_state;
1494
1495 spin_lock(&fs_info->qgroup_lock);
1496 while (!list_empty(&fs_info->dirty_qgroups)) {
1497 struct btrfs_qgroup *qgroup;
1498 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1499 struct btrfs_qgroup, dirty);
1500 list_del_init(&qgroup->dirty);
1501 spin_unlock(&fs_info->qgroup_lock);
1502 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1503 if (ret)
1504 fs_info->qgroup_flags |=
1505 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1506 spin_lock(&fs_info->qgroup_lock);
1507 }
1508 if (fs_info->quota_enabled)
1509 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1510 else
1511 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1512 spin_unlock(&fs_info->qgroup_lock);
1513
1514 ret = update_qgroup_status_item(trans, fs_info, quota_root);
1515 if (ret)
1516 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1517
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001518 if (!ret && start_rescan_worker) {
Jan Schmidtb382a322013-05-28 15:47:24 +00001519 ret = qgroup_rescan_init(fs_info, 0, 1);
1520 if (!ret) {
1521 qgroup_rescan_zero_tracking(fs_info);
1522 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
1523 &fs_info->qgroup_rescan_work);
1524 }
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001525 ret = 0;
1526 }
1527
Arne Jansenbed92ea2012-06-28 18:03:02 +02001528out:
1529
1530 return ret;
1531}
1532
1533/*
1534 * copy the acounting information between qgroups. This is necessary when a
1535 * snapshot or a subvolume is created
1536 */
1537int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1538 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1539 struct btrfs_qgroup_inherit *inherit)
1540{
1541 int ret = 0;
1542 int i;
1543 u64 *i_qgroups;
1544 struct btrfs_root *quota_root = fs_info->quota_root;
1545 struct btrfs_qgroup *srcgroup;
1546 struct btrfs_qgroup *dstgroup;
1547 u32 level_size = 0;
Wang Shilong3f5e2d32013-04-07 10:50:19 +00001548 u64 nums;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001549
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001550 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001551 if (!fs_info->quota_enabled)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001552 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001553
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001554 if (!quota_root) {
1555 ret = -EINVAL;
1556 goto out;
1557 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001558
Wang Shilong3f5e2d32013-04-07 10:50:19 +00001559 if (inherit) {
1560 i_qgroups = (u64 *)(inherit + 1);
1561 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1562 2 * inherit->num_excl_copies;
1563 for (i = 0; i < nums; ++i) {
1564 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1565 if (!srcgroup) {
1566 ret = -EINVAL;
1567 goto out;
1568 }
1569 ++i_qgroups;
1570 }
1571 }
1572
Arne Jansenbed92ea2012-06-28 18:03:02 +02001573 /*
1574 * create a tracking group for the subvol itself
1575 */
1576 ret = add_qgroup_item(trans, quota_root, objectid);
1577 if (ret)
1578 goto out;
1579
1580 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1581 ret = update_qgroup_limit_item(trans, quota_root, objectid,
1582 inherit->lim.flags,
1583 inherit->lim.max_rfer,
1584 inherit->lim.max_excl,
1585 inherit->lim.rsv_rfer,
1586 inherit->lim.rsv_excl);
1587 if (ret)
1588 goto out;
1589 }
1590
1591 if (srcid) {
1592 struct btrfs_root *srcroot;
1593 struct btrfs_key srckey;
1594 int srcroot_level;
1595
1596 srckey.objectid = srcid;
1597 srckey.type = BTRFS_ROOT_ITEM_KEY;
1598 srckey.offset = (u64)-1;
1599 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1600 if (IS_ERR(srcroot)) {
1601 ret = PTR_ERR(srcroot);
1602 goto out;
1603 }
1604
1605 rcu_read_lock();
1606 srcroot_level = btrfs_header_level(srcroot->node);
1607 level_size = btrfs_level_size(srcroot, srcroot_level);
1608 rcu_read_unlock();
1609 }
1610
1611 /*
1612 * add qgroup to all inherited groups
1613 */
1614 if (inherit) {
1615 i_qgroups = (u64 *)(inherit + 1);
1616 for (i = 0; i < inherit->num_qgroups; ++i) {
1617 ret = add_qgroup_relation_item(trans, quota_root,
1618 objectid, *i_qgroups);
1619 if (ret)
1620 goto out;
1621 ret = add_qgroup_relation_item(trans, quota_root,
1622 *i_qgroups, objectid);
1623 if (ret)
1624 goto out;
1625 ++i_qgroups;
1626 }
1627 }
1628
1629
1630 spin_lock(&fs_info->qgroup_lock);
1631
1632 dstgroup = add_qgroup_rb(fs_info, objectid);
Dan Carpenter57a5a882012-07-30 02:15:43 -06001633 if (IS_ERR(dstgroup)) {
1634 ret = PTR_ERR(dstgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001635 goto unlock;
Dan Carpenter57a5a882012-07-30 02:15:43 -06001636 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001637
1638 if (srcid) {
1639 srcgroup = find_qgroup_rb(fs_info, srcid);
Chris Masonf3a87f12012-09-14 20:06:30 -04001640 if (!srcgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001641 goto unlock;
1642 dstgroup->rfer = srcgroup->rfer - level_size;
1643 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1644 srcgroup->excl = level_size;
1645 srcgroup->excl_cmpr = level_size;
1646 qgroup_dirty(fs_info, dstgroup);
1647 qgroup_dirty(fs_info, srcgroup);
1648 }
1649
Chris Masonf3a87f12012-09-14 20:06:30 -04001650 if (!inherit)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001651 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001652
1653 i_qgroups = (u64 *)(inherit + 1);
1654 for (i = 0; i < inherit->num_qgroups; ++i) {
1655 ret = add_relation_rb(quota_root->fs_info, objectid,
1656 *i_qgroups);
1657 if (ret)
1658 goto unlock;
1659 ++i_qgroups;
1660 }
1661
1662 for (i = 0; i < inherit->num_ref_copies; ++i) {
1663 struct btrfs_qgroup *src;
1664 struct btrfs_qgroup *dst;
1665
1666 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1667 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1668
1669 if (!src || !dst) {
1670 ret = -EINVAL;
1671 goto unlock;
1672 }
1673
1674 dst->rfer = src->rfer - level_size;
1675 dst->rfer_cmpr = src->rfer_cmpr - level_size;
1676 i_qgroups += 2;
1677 }
1678 for (i = 0; i < inherit->num_excl_copies; ++i) {
1679 struct btrfs_qgroup *src;
1680 struct btrfs_qgroup *dst;
1681
1682 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1683 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1684
1685 if (!src || !dst) {
1686 ret = -EINVAL;
1687 goto unlock;
1688 }
1689
1690 dst->excl = src->excl + level_size;
1691 dst->excl_cmpr = src->excl_cmpr + level_size;
1692 i_qgroups += 2;
1693 }
1694
1695unlock:
1696 spin_unlock(&fs_info->qgroup_lock);
1697out:
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001698 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001699 return ret;
1700}
1701
1702/*
1703 * reserve some space for a qgroup and all its parents. The reservation takes
1704 * place with start_transaction or dealloc_reserve, similar to ENOSPC
1705 * accounting. If not enough space is available, EDQUOT is returned.
1706 * We assume that the requested space is new for all qgroups.
1707 */
1708int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
1709{
1710 struct btrfs_root *quota_root;
1711 struct btrfs_qgroup *qgroup;
1712 struct btrfs_fs_info *fs_info = root->fs_info;
1713 u64 ref_root = root->root_key.objectid;
1714 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001715 struct ulist_node *unode;
1716 struct ulist_iterator uiter;
1717
1718 if (!is_fstree(ref_root))
1719 return 0;
1720
1721 if (num_bytes == 0)
1722 return 0;
1723
1724 spin_lock(&fs_info->qgroup_lock);
1725 quota_root = fs_info->quota_root;
1726 if (!quota_root)
1727 goto out;
1728
1729 qgroup = find_qgroup_rb(fs_info, ref_root);
1730 if (!qgroup)
1731 goto out;
1732
1733 /*
1734 * in a first step, we check all affected qgroups if any limits would
1735 * be exceeded
1736 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001737 ulist_reinit(fs_info->qgroup_ulist);
1738 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001739 (uintptr_t)qgroup, GFP_ATOMIC);
1740 if (ret < 0)
1741 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001742 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00001743 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001744 struct btrfs_qgroup *qg;
1745 struct btrfs_qgroup_list *glist;
1746
Jan Schmidt995e01b2012-08-13 02:52:38 -06001747 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001748
1749 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
Wang Shilongb4fcd6b2013-04-15 12:56:49 +00001750 qg->reserved + (s64)qg->rfer + num_bytes >
Wang Shilong720f1e22013-03-06 11:51:47 +00001751 qg->max_rfer) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001752 ret = -EDQUOT;
Wang Shilong720f1e22013-03-06 11:51:47 +00001753 goto out;
1754 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001755
1756 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
Wang Shilongb4fcd6b2013-04-15 12:56:49 +00001757 qg->reserved + (s64)qg->excl + num_bytes >
Wang Shilong720f1e22013-03-06 11:51:47 +00001758 qg->max_excl) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001759 ret = -EDQUOT;
Wang Shilong720f1e22013-03-06 11:51:47 +00001760 goto out;
1761 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001762
1763 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00001764 ret = ulist_add(fs_info->qgroup_ulist,
1765 glist->group->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001766 (uintptr_t)glist->group, GFP_ATOMIC);
1767 if (ret < 0)
1768 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001769 }
1770 }
Wang Shilong3c971852013-04-17 14:00:36 +00001771 ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001772 /*
1773 * no limits exceeded, now record the reservation into all qgroups
1774 */
1775 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00001776 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001777 struct btrfs_qgroup *qg;
1778
Jan Schmidt995e01b2012-08-13 02:52:38 -06001779 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001780
1781 qg->reserved += num_bytes;
1782 }
1783
1784out:
1785 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001786 return ret;
1787}
1788
1789void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
1790{
1791 struct btrfs_root *quota_root;
1792 struct btrfs_qgroup *qgroup;
1793 struct btrfs_fs_info *fs_info = root->fs_info;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001794 struct ulist_node *unode;
1795 struct ulist_iterator uiter;
1796 u64 ref_root = root->root_key.objectid;
Wang Shilong3c971852013-04-17 14:00:36 +00001797 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001798
1799 if (!is_fstree(ref_root))
1800 return;
1801
1802 if (num_bytes == 0)
1803 return;
1804
1805 spin_lock(&fs_info->qgroup_lock);
1806
1807 quota_root = fs_info->quota_root;
1808 if (!quota_root)
1809 goto out;
1810
1811 qgroup = find_qgroup_rb(fs_info, ref_root);
1812 if (!qgroup)
1813 goto out;
1814
Wang Shilong1e8f9152013-05-06 11:03:27 +00001815 ulist_reinit(fs_info->qgroup_ulist);
1816 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001817 (uintptr_t)qgroup, GFP_ATOMIC);
1818 if (ret < 0)
1819 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001820 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00001821 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001822 struct btrfs_qgroup *qg;
1823 struct btrfs_qgroup_list *glist;
1824
Jan Schmidt995e01b2012-08-13 02:52:38 -06001825 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001826
1827 qg->reserved -= num_bytes;
1828
1829 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00001830 ret = ulist_add(fs_info->qgroup_ulist,
1831 glist->group->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001832 (uintptr_t)glist->group, GFP_ATOMIC);
1833 if (ret < 0)
1834 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001835 }
1836 }
1837
1838out:
1839 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001840}
1841
1842void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
1843{
1844 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
1845 return;
Jan Schmidtfc36ed72013-04-24 16:57:33 +00001846 pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
Arne Jansenbed92ea2012-06-28 18:03:02 +02001847 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
Jan Schmidtfc36ed72013-04-24 16:57:33 +00001848 (u32)(trans->delayed_ref_elem.seq >> 32),
1849 (u32)trans->delayed_ref_elem.seq);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001850 BUG();
1851}
Jan Schmidt2f232032013-04-25 16:04:51 +00001852
1853/*
1854 * returns < 0 on error, 0 when more leafs are to be scanned.
1855 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
1856 */
1857static int
Jan Schmidtb382a322013-05-28 15:47:24 +00001858qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
Jan Schmidt2f232032013-04-25 16:04:51 +00001859 struct btrfs_trans_handle *trans, struct ulist *tmp,
1860 struct extent_buffer *scratch_leaf)
1861{
1862 struct btrfs_key found;
Jan Schmidt2f232032013-04-25 16:04:51 +00001863 struct ulist *roots = NULL;
1864 struct ulist_node *unode;
1865 struct ulist_iterator uiter;
1866 struct seq_list tree_mod_seq_elem = {};
1867 u64 seq;
1868 int slot;
1869 int ret;
1870
1871 path->leave_spinning = 1;
1872 mutex_lock(&fs_info->qgroup_rescan_lock);
1873 ret = btrfs_search_slot_for_read(fs_info->extent_root,
1874 &fs_info->qgroup_rescan_progress,
1875 path, 1, 0);
1876
1877 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
1878 (unsigned long long)fs_info->qgroup_rescan_progress.objectid,
1879 fs_info->qgroup_rescan_progress.type,
1880 (unsigned long long)fs_info->qgroup_rescan_progress.offset,
1881 ret);
1882
1883 if (ret) {
1884 /*
1885 * The rescan is about to end, we will not be scanning any
1886 * further blocks. We cannot unset the RESCAN flag here, because
1887 * we want to commit the transaction if everything went well.
1888 * To make the live accounting work in this phase, we set our
1889 * scan progress pointer such that every real extent objectid
1890 * will be smaller.
1891 */
1892 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
1893 btrfs_release_path(path);
1894 mutex_unlock(&fs_info->qgroup_rescan_lock);
1895 return ret;
1896 }
1897
1898 btrfs_item_key_to_cpu(path->nodes[0], &found,
1899 btrfs_header_nritems(path->nodes[0]) - 1);
1900 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
1901
1902 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1903 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
1904 slot = path->slots[0];
1905 btrfs_release_path(path);
1906 mutex_unlock(&fs_info->qgroup_rescan_lock);
1907
1908 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
1909 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
1910 if (found.type != BTRFS_EXTENT_ITEM_KEY)
1911 continue;
1912 ret = btrfs_find_all_roots(trans, fs_info, found.objectid,
1913 tree_mod_seq_elem.seq, &roots);
1914 if (ret < 0)
1915 goto out;
1916 spin_lock(&fs_info->qgroup_lock);
1917 seq = fs_info->qgroup_seq;
1918 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1919
1920 ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
1921 if (ret) {
1922 spin_unlock(&fs_info->qgroup_lock);
1923 ulist_free(roots);
1924 goto out;
1925 }
1926
1927 /*
1928 * step2 of btrfs_qgroup_account_ref works from a single root,
1929 * we're doing all at once here.
1930 */
1931 ulist_reinit(tmp);
1932 ULIST_ITER_INIT(&uiter);
1933 while ((unode = ulist_next(roots, &uiter))) {
1934 struct btrfs_qgroup *qg;
1935
1936 qg = find_qgroup_rb(fs_info, unode->val);
1937 if (!qg)
1938 continue;
1939
1940 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg,
1941 GFP_ATOMIC);
1942 if (ret < 0) {
1943 spin_unlock(&fs_info->qgroup_lock);
1944 ulist_free(roots);
1945 goto out;
1946 }
1947 }
1948
1949 /* this loop is similar to step 2 of btrfs_qgroup_account_ref */
1950 ULIST_ITER_INIT(&uiter);
1951 while ((unode = ulist_next(tmp, &uiter))) {
1952 struct btrfs_qgroup *qg;
1953 struct btrfs_qgroup_list *glist;
1954
1955 qg = (struct btrfs_qgroup *)(uintptr_t) unode->aux;
1956 qg->rfer += found.offset;
1957 qg->rfer_cmpr += found.offset;
1958 WARN_ON(qg->tag >= seq);
1959 if (qg->refcnt - seq == roots->nnodes) {
1960 qg->excl += found.offset;
1961 qg->excl_cmpr += found.offset;
1962 }
1963 qgroup_dirty(fs_info, qg);
1964
1965 list_for_each_entry(glist, &qg->groups, next_group) {
1966 ret = ulist_add(tmp, glist->group->qgroupid,
1967 (uintptr_t)glist->group,
1968 GFP_ATOMIC);
1969 if (ret < 0) {
1970 spin_unlock(&fs_info->qgroup_lock);
1971 ulist_free(roots);
1972 goto out;
1973 }
1974 }
1975 }
1976
1977 spin_unlock(&fs_info->qgroup_lock);
1978 ulist_free(roots);
1979 ret = 0;
1980 }
1981
1982out:
1983 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1984
1985 return ret;
1986}
1987
1988static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1989{
Jan Schmidtb382a322013-05-28 15:47:24 +00001990 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
1991 qgroup_rescan_work);
Jan Schmidt2f232032013-04-25 16:04:51 +00001992 struct btrfs_path *path;
1993 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt2f232032013-04-25 16:04:51 +00001994 struct ulist *tmp = NULL;
1995 struct extent_buffer *scratch_leaf = NULL;
1996 int err = -ENOMEM;
1997
1998 path = btrfs_alloc_path();
1999 if (!path)
2000 goto out;
2001 tmp = ulist_alloc(GFP_NOFS);
2002 if (!tmp)
2003 goto out;
2004 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2005 if (!scratch_leaf)
2006 goto out;
2007
2008 err = 0;
2009 while (!err) {
2010 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2011 if (IS_ERR(trans)) {
2012 err = PTR_ERR(trans);
2013 break;
2014 }
2015 if (!fs_info->quota_enabled) {
2016 err = -EINTR;
2017 } else {
Jan Schmidtb382a322013-05-28 15:47:24 +00002018 err = qgroup_rescan_leaf(fs_info, path, trans,
Jan Schmidt2f232032013-04-25 16:04:51 +00002019 tmp, scratch_leaf);
2020 }
2021 if (err > 0)
2022 btrfs_commit_transaction(trans, fs_info->fs_root);
2023 else
2024 btrfs_end_transaction(trans, fs_info->fs_root);
2025 }
2026
2027out:
2028 kfree(scratch_leaf);
2029 ulist_free(tmp);
2030 btrfs_free_path(path);
Jan Schmidt2f232032013-04-25 16:04:51 +00002031
2032 mutex_lock(&fs_info->qgroup_rescan_lock);
2033 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2034
2035 if (err == 2 &&
2036 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2037 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2038 } else if (err < 0) {
2039 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2040 }
2041 mutex_unlock(&fs_info->qgroup_rescan_lock);
2042
2043 if (err >= 0) {
2044 pr_info("btrfs: qgroup scan completed%s\n",
2045 err == 2 ? " (inconsistency flag cleared)" : "");
2046 } else {
2047 pr_err("btrfs: qgroup scan failed with %d\n", err);
2048 }
Jan Schmidt57254b6e2013-05-06 19:14:17 +00002049
2050 complete_all(&fs_info->qgroup_rescan_completion);
Jan Schmidt2f232032013-04-25 16:04:51 +00002051}
2052
Jan Schmidtb382a322013-05-28 15:47:24 +00002053/*
2054 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2055 * memory required for the rescan context.
2056 */
2057static int
2058qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2059 int init_flags)
Jan Schmidt2f232032013-04-25 16:04:51 +00002060{
2061 int ret = 0;
Jan Schmidt2f232032013-04-25 16:04:51 +00002062
Jan Schmidtb382a322013-05-28 15:47:24 +00002063 if (!init_flags &&
2064 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2065 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2066 ret = -EINVAL;
2067 goto err;
2068 }
Jan Schmidt2f232032013-04-25 16:04:51 +00002069
2070 mutex_lock(&fs_info->qgroup_rescan_lock);
2071 spin_lock(&fs_info->qgroup_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00002072
2073 if (init_flags) {
2074 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2075 ret = -EINPROGRESS;
2076 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2077 ret = -EINVAL;
2078
2079 if (ret) {
2080 spin_unlock(&fs_info->qgroup_lock);
2081 mutex_unlock(&fs_info->qgroup_rescan_lock);
2082 goto err;
2083 }
2084
2085 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2086 }
2087
2088 memset(&fs_info->qgroup_rescan_progress, 0,
2089 sizeof(fs_info->qgroup_rescan_progress));
2090 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2091
2092 spin_unlock(&fs_info->qgroup_lock);
2093 mutex_unlock(&fs_info->qgroup_rescan_lock);
2094
2095 init_completion(&fs_info->qgroup_rescan_completion);
2096
2097 memset(&fs_info->qgroup_rescan_work, 0,
2098 sizeof(fs_info->qgroup_rescan_work));
2099 fs_info->qgroup_rescan_work.func = btrfs_qgroup_rescan_worker;
2100
Jan Schmidt2f232032013-04-25 16:04:51 +00002101 if (ret) {
Jan Schmidtb382a322013-05-28 15:47:24 +00002102err:
2103 pr_info("btrfs: qgroup_rescan_init failed with %d\n", ret);
Jan Schmidt2f232032013-04-25 16:04:51 +00002104 return ret;
2105 }
2106
Jan Schmidtb382a322013-05-28 15:47:24 +00002107 return 0;
2108}
Jan Schmidt2f232032013-04-25 16:04:51 +00002109
Jan Schmidtb382a322013-05-28 15:47:24 +00002110static void
2111qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2112{
2113 struct rb_node *n;
2114 struct btrfs_qgroup *qgroup;
2115
2116 spin_lock(&fs_info->qgroup_lock);
Jan Schmidt2f232032013-04-25 16:04:51 +00002117 /* clear all current qgroup tracking information */
2118 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2119 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2120 qgroup->rfer = 0;
2121 qgroup->rfer_cmpr = 0;
2122 qgroup->excl = 0;
2123 qgroup->excl_cmpr = 0;
2124 }
2125 spin_unlock(&fs_info->qgroup_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00002126}
Jan Schmidt2f232032013-04-25 16:04:51 +00002127
Jan Schmidtb382a322013-05-28 15:47:24 +00002128int
2129btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2130{
2131 int ret = 0;
2132 struct btrfs_trans_handle *trans;
2133
2134 ret = qgroup_rescan_init(fs_info, 0, 1);
2135 if (ret)
2136 return ret;
2137
2138 /*
2139 * We have set the rescan_progress to 0, which means no more
2140 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2141 * However, btrfs_qgroup_account_ref may be right after its call
2142 * to btrfs_find_all_roots, in which case it would still do the
2143 * accounting.
2144 * To solve this, we're committing the transaction, which will
2145 * ensure we run all delayed refs and only after that, we are
2146 * going to clear all tracking information for a clean start.
2147 */
2148
2149 trans = btrfs_join_transaction(fs_info->fs_root);
2150 if (IS_ERR(trans)) {
2151 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2152 return PTR_ERR(trans);
2153 }
2154 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2155 if (ret) {
2156 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2157 return ret;
2158 }
2159
2160 qgroup_rescan_zero_tracking(fs_info);
2161
2162 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2163 &fs_info->qgroup_rescan_work);
Jan Schmidt2f232032013-04-25 16:04:51 +00002164
2165 return 0;
2166}
Jan Schmidt57254b6e2013-05-06 19:14:17 +00002167
2168int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2169{
2170 int running;
2171 int ret = 0;
2172
2173 mutex_lock(&fs_info->qgroup_rescan_lock);
2174 spin_lock(&fs_info->qgroup_lock);
2175 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2176 spin_unlock(&fs_info->qgroup_lock);
2177 mutex_unlock(&fs_info->qgroup_rescan_lock);
2178
2179 if (running)
2180 ret = wait_for_completion_interruptible(
2181 &fs_info->qgroup_rescan_completion);
2182
2183 return ret;
2184}
Jan Schmidtb382a322013-05-28 15:47:24 +00002185
2186/*
2187 * this is only called from open_ctree where we're still single threaded, thus
2188 * locking is omitted here.
2189 */
2190void
2191btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2192{
2193 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2194 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2195 &fs_info->qgroup_rescan_work);
2196}