2012-06-28 16:03:02 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011 STRATO. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/workqueue.h>
|
2013-01-29 06:04:50 +00:00
|
|
|
#include <linux/btrfs.h>
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
#include "ctree.h"
|
|
|
|
#include "transaction.h"
|
|
|
|
#include "disk-io.h"
|
|
|
|
#include "locking.h"
|
|
|
|
#include "ulist.h"
|
|
|
|
#include "backref.h"
|
2013-04-25 16:04:51 +00:00
|
|
|
#include "extent_io.h"
|
2014-05-14 00:30:47 +00:00
|
|
|
#include "qgroup.h"
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
/* TODO XXX FIXME
|
|
|
|
* - subvol delete -> delete when ref goes to 0? delete limits also?
|
|
|
|
* - reorganize keys
|
|
|
|
* - compressed
|
|
|
|
* - sync
|
|
|
|
* - copy also limits on subvol creation
|
|
|
|
* - limit
|
|
|
|
* - caches fuer ulists
|
|
|
|
* - performance benchmarks
|
|
|
|
* - check all ioctl parameters
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* one struct for each qgroup, organized in fs_info->qgroup_tree.
|
|
|
|
*/
|
|
|
|
struct btrfs_qgroup {
|
|
|
|
u64 qgroupid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* state
|
|
|
|
*/
|
|
|
|
u64 rfer; /* referenced */
|
|
|
|
u64 rfer_cmpr; /* referenced compressed */
|
|
|
|
u64 excl; /* exclusive */
|
|
|
|
u64 excl_cmpr; /* exclusive compressed */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* limits
|
|
|
|
*/
|
|
|
|
u64 lim_flags; /* which limits are set */
|
|
|
|
u64 max_rfer;
|
|
|
|
u64 max_excl;
|
|
|
|
u64 rsv_rfer;
|
|
|
|
u64 rsv_excl;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reservation tracking
|
|
|
|
*/
|
|
|
|
u64 reserved;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lists
|
|
|
|
*/
|
|
|
|
struct list_head groups; /* groups this group is member of */
|
|
|
|
struct list_head members; /* groups that are members of this group */
|
|
|
|
struct list_head dirty; /* dirty groups */
|
|
|
|
struct rb_node node; /* tree of qgroups */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* temp variables for accounting operations
|
2015-03-12 08:10:13 +00:00
|
|
|
* Refer to qgroup_shared_accouting() for details.
|
2012-06-28 16:03:02 +00:00
|
|
|
*/
|
2014-05-14 00:30:47 +00:00
|
|
|
u64 old_refcnt;
|
|
|
|
u64 new_refcnt;
|
2012-06-28 16:03:02 +00:00
|
|
|
};
|
|
|
|
|
2015-03-12 08:10:13 +00:00
|
|
|
static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
|
|
|
|
int mod)
|
|
|
|
{
|
|
|
|
if (qg->old_refcnt < seq)
|
|
|
|
qg->old_refcnt = seq;
|
|
|
|
qg->old_refcnt += mod;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
|
|
|
|
int mod)
|
|
|
|
{
|
|
|
|
if (qg->new_refcnt < seq)
|
|
|
|
qg->new_refcnt = seq;
|
|
|
|
qg->new_refcnt += mod;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
|
|
|
|
{
|
|
|
|
if (qg->old_refcnt < seq)
|
|
|
|
return 0;
|
|
|
|
return qg->old_refcnt - seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
|
|
|
|
{
|
|
|
|
if (qg->new_refcnt < seq)
|
|
|
|
return 0;
|
|
|
|
return qg->new_refcnt - seq;
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
/*
|
|
|
|
* glue structure to represent the relations between qgroups.
|
|
|
|
*/
|
|
|
|
struct btrfs_qgroup_list {
|
|
|
|
struct list_head next_group;
|
|
|
|
struct list_head next_member;
|
|
|
|
struct btrfs_qgroup *group;
|
|
|
|
struct btrfs_qgroup *member;
|
|
|
|
};
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
#define ptr_to_u64(x) ((u64)(uintptr_t)x)
|
|
|
|
#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
|
|
|
|
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
static int
|
|
|
|
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
|
|
|
|
int init_flags);
|
|
|
|
static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
|
2013-04-25 16:04:51 +00:00
|
|
|
|
2013-04-07 10:50:17 +00:00
|
|
|
/* must be called with qgroup_ioctl_lock held */
|
2012-06-28 16:03:02 +00:00
|
|
|
static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 qgroupid)
|
|
|
|
{
|
|
|
|
struct rb_node *n = fs_info->qgroup_tree.rb_node;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
|
|
|
|
while (n) {
|
|
|
|
qgroup = rb_entry(n, struct btrfs_qgroup, node);
|
|
|
|
if (qgroup->qgroupid < qgroupid)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (qgroup->qgroupid > qgroupid)
|
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return qgroup;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* must be called with qgroup_lock held */
|
|
|
|
static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 qgroupid)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &fs_info->qgroup_tree.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
qgroup = rb_entry(parent, struct btrfs_qgroup, node);
|
|
|
|
|
|
|
|
if (qgroup->qgroupid < qgroupid)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (qgroup->qgroupid > qgroupid)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
return qgroup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
|
|
|
|
if (!qgroup)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
qgroup->qgroupid = qgroupid;
|
|
|
|
INIT_LIST_HEAD(&qgroup->groups);
|
|
|
|
INIT_LIST_HEAD(&qgroup->members);
|
|
|
|
INIT_LIST_HEAD(&qgroup->dirty);
|
|
|
|
|
|
|
|
rb_link_node(&qgroup->node, parent, p);
|
|
|
|
rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
|
|
|
|
|
|
|
|
return qgroup;
|
|
|
|
}
|
|
|
|
|
2013-08-14 01:13:36 +00:00
|
|
|
static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
|
2012-06-28 16:03:02 +00:00
|
|
|
{
|
|
|
|
struct btrfs_qgroup_list *list;
|
|
|
|
|
|
|
|
list_del(&qgroup->dirty);
|
|
|
|
while (!list_empty(&qgroup->groups)) {
|
|
|
|
list = list_first_entry(&qgroup->groups,
|
|
|
|
struct btrfs_qgroup_list, next_group);
|
|
|
|
list_del(&list->next_group);
|
|
|
|
list_del(&list->next_member);
|
|
|
|
kfree(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!list_empty(&qgroup->members)) {
|
|
|
|
list = list_first_entry(&qgroup->members,
|
|
|
|
struct btrfs_qgroup_list, next_member);
|
|
|
|
list_del(&list->next_group);
|
|
|
|
list_del(&list->next_member);
|
|
|
|
kfree(list);
|
|
|
|
}
|
|
|
|
kfree(qgroup);
|
2013-08-14 01:13:36 +00:00
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-08-14 01:13:36 +00:00
|
|
|
/* must be called with qgroup_lock held */
|
|
|
|
static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
|
|
|
|
{
|
|
|
|
struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
|
|
|
|
|
|
|
|
if (!qgroup)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
rb_erase(&qgroup->node, &fs_info->qgroup_tree);
|
|
|
|
__del_qgroup_rb(qgroup);
|
2012-06-28 16:03:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* must be called with qgroup_lock held */
|
|
|
|
static int add_relation_rb(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 memberid, u64 parentid)
|
|
|
|
{
|
|
|
|
struct btrfs_qgroup *member;
|
|
|
|
struct btrfs_qgroup *parent;
|
|
|
|
struct btrfs_qgroup_list *list;
|
|
|
|
|
|
|
|
member = find_qgroup_rb(fs_info, memberid);
|
|
|
|
parent = find_qgroup_rb(fs_info, parentid);
|
|
|
|
if (!member || !parent)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
list = kzalloc(sizeof(*list), GFP_ATOMIC);
|
|
|
|
if (!list)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
list->group = parent;
|
|
|
|
list->member = member;
|
|
|
|
list_add_tail(&list->next_group, &member->groups);
|
|
|
|
list_add_tail(&list->next_member, &parent->members);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* must be called with qgroup_lock held */
|
|
|
|
static int del_relation_rb(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 memberid, u64 parentid)
|
|
|
|
{
|
|
|
|
struct btrfs_qgroup *member;
|
|
|
|
struct btrfs_qgroup *parent;
|
|
|
|
struct btrfs_qgroup_list *list;
|
|
|
|
|
|
|
|
member = find_qgroup_rb(fs_info, memberid);
|
|
|
|
parent = find_qgroup_rb(fs_info, parentid);
|
|
|
|
if (!member || !parent)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
list_for_each_entry(list, &member->groups, next_group) {
|
|
|
|
if (list->group == parent) {
|
|
|
|
list_del(&list->next_group);
|
|
|
|
list_del(&list->next_member);
|
|
|
|
kfree(list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:06:09 +00:00
|
|
|
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
|
|
|
int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
|
|
|
|
u64 rfer, u64 excl)
|
|
|
|
{
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
|
|
|
|
qgroup = find_qgroup_rb(fs_info, qgroupid);
|
|
|
|
if (!qgroup)
|
|
|
|
return -EINVAL;
|
|
|
|
if (qgroup->rfer != rfer || qgroup->excl != excl)
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
/*
|
|
|
|
* The full config is read in one go, only called from open_ctree()
|
|
|
|
* It doesn't use any locking, as at this point we're still single-threaded
|
|
|
|
*/
|
|
|
|
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
|
|
|
struct btrfs_root *quota_root = fs_info->quota_root;
|
|
|
|
struct btrfs_path *path = NULL;
|
|
|
|
struct extent_buffer *l;
|
|
|
|
int slot;
|
|
|
|
int ret = 0;
|
|
|
|
u64 flags = 0;
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
u64 rescan_progress = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
if (!fs_info->quota_enabled)
|
|
|
|
return 0;
|
|
|
|
|
2013-05-06 11:03:27 +00:00
|
|
|
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!fs_info->qgroup_ulist) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* default this to quota off, in case no status key is found */
|
|
|
|
fs_info->qgroup_flags = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pass 1: read status, all qgroup infos and limits
|
|
|
|
*/
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = 0;
|
|
|
|
key.offset = 0;
|
|
|
|
ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
|
|
|
|
slot = path->slots[0];
|
|
|
|
l = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(l, &found_key, slot);
|
|
|
|
|
|
|
|
if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
|
|
|
|
struct btrfs_qgroup_status_item *ptr;
|
|
|
|
|
|
|
|
ptr = btrfs_item_ptr(l, slot,
|
|
|
|
struct btrfs_qgroup_status_item);
|
|
|
|
|
|
|
|
if (btrfs_qgroup_status_version(l, ptr) !=
|
|
|
|
BTRFS_QGROUP_STATUS_VERSION) {
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_err(fs_info,
|
|
|
|
"old qgroup version, quota disabled");
|
2012-06-28 16:03:02 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (btrfs_qgroup_status_generation(l, ptr) !=
|
|
|
|
fs_info->generation) {
|
|
|
|
flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_err(fs_info,
|
|
|
|
"qgroup generation mismatch, "
|
|
|
|
"marked as inconsistent");
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
|
|
|
|
ptr);
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
|
2012-06-28 16:03:02 +00:00
|
|
|
goto next1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
|
|
|
|
found_key.type != BTRFS_QGROUP_LIMIT_KEY)
|
|
|
|
goto next1;
|
|
|
|
|
|
|
|
qgroup = find_qgroup_rb(fs_info, found_key.offset);
|
|
|
|
if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
|
|
|
|
(!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_err(fs_info, "inconsitent qgroup config");
|
2012-06-28 16:03:02 +00:00
|
|
|
flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
}
|
|
|
|
if (!qgroup) {
|
|
|
|
qgroup = add_qgroup_rb(fs_info, found_key.offset);
|
|
|
|
if (IS_ERR(qgroup)) {
|
|
|
|
ret = PTR_ERR(qgroup);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch (found_key.type) {
|
|
|
|
case BTRFS_QGROUP_INFO_KEY: {
|
|
|
|
struct btrfs_qgroup_info_item *ptr;
|
|
|
|
|
|
|
|
ptr = btrfs_item_ptr(l, slot,
|
|
|
|
struct btrfs_qgroup_info_item);
|
|
|
|
qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
|
|
|
|
qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
|
|
|
|
qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
|
|
|
|
qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
|
|
|
|
/* generation currently unused */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BTRFS_QGROUP_LIMIT_KEY: {
|
|
|
|
struct btrfs_qgroup_limit_item *ptr;
|
|
|
|
|
|
|
|
ptr = btrfs_item_ptr(l, slot,
|
|
|
|
struct btrfs_qgroup_limit_item);
|
|
|
|
qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
|
|
|
|
qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
|
|
|
|
qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
|
|
|
|
qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
|
|
|
|
qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
next1:
|
|
|
|
ret = btrfs_next_item(quota_root, path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
btrfs_release_path(path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pass 2: read all qgroup relations
|
|
|
|
*/
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_QGROUP_RELATION_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
while (1) {
|
|
|
|
slot = path->slots[0];
|
|
|
|
l = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(l, &found_key, slot);
|
|
|
|
|
|
|
|
if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
|
|
|
|
goto next2;
|
|
|
|
|
|
|
|
if (found_key.objectid > found_key.offset) {
|
|
|
|
/* parent <- member, not needed to build config */
|
|
|
|
/* FIXME should we omit the key completely? */
|
|
|
|
goto next2;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = add_relation_rb(fs_info, found_key.objectid,
|
|
|
|
found_key.offset);
|
2013-01-17 08:22:08 +00:00
|
|
|
if (ret == -ENOENT) {
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_warn(fs_info,
|
|
|
|
"orphan qgroup relation 0x%llx->0x%llx",
|
2013-08-20 11:20:07 +00:00
|
|
|
found_key.objectid, found_key.offset);
|
2013-01-17 08:22:08 +00:00
|
|
|
ret = 0; /* ignore the error */
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
next2:
|
|
|
|
ret = btrfs_next_item(quota_root, path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
fs_info->qgroup_flags |= flags;
|
|
|
|
if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
|
|
|
|
fs_info->quota_enabled = 0;
|
|
|
|
fs_info->pending_quota_state = 0;
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
} else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
|
|
|
|
ret >= 0) {
|
|
|
|
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
btrfs_free_path(path);
|
|
|
|
|
2013-05-28 15:47:23 +00:00
|
|
|
if (ret < 0) {
|
2013-05-06 11:03:27 +00:00
|
|
|
ulist_free(fs_info->qgroup_ulist);
|
2013-05-28 15:47:23 +00:00
|
|
|
fs_info->qgroup_ulist = NULL;
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
2013-05-28 15:47:23 +00:00
|
|
|
}
|
2013-05-06 11:03:27 +00:00
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret < 0 ? ret : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-08-14 01:13:37 +00:00
|
|
|
* This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
|
|
|
|
* first two are in single-threaded paths.And for the third one, we have set
|
|
|
|
* quota_root to be null with qgroup_lock held before, so it is safe to clean
|
|
|
|
* up the in-memory structures without qgroup_lock held.
|
2012-06-28 16:03:02 +00:00
|
|
|
*/
|
|
|
|
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
|
|
|
|
while ((n = rb_first(&fs_info->qgroup_tree))) {
|
|
|
|
qgroup = rb_entry(n, struct btrfs_qgroup, node);
|
|
|
|
rb_erase(n, &fs_info->qgroup_tree);
|
2013-08-14 01:13:36 +00:00
|
|
|
__del_qgroup_rb(qgroup);
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
2013-07-13 13:02:54 +00:00
|
|
|
/*
|
|
|
|
* we call btrfs_free_qgroup_config() when umounting
|
|
|
|
* filesystem and disabling quota, so we set qgroup_ulit
|
|
|
|
* to be null here to avoid double free.
|
|
|
|
*/
|
2013-05-06 11:03:27 +00:00
|
|
|
ulist_free(fs_info->qgroup_ulist);
|
2013-07-13 13:02:54 +00:00
|
|
|
fs_info->qgroup_ulist = NULL;
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *quota_root,
|
|
|
|
u64 src, u64 dst)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = src;
|
|
|
|
key.type = BTRFS_QGROUP_RELATION_KEY;
|
|
|
|
key.offset = dst;
|
|
|
|
|
|
|
|
ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
|
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(path->nodes[0]);
|
|
|
|
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *quota_root,
|
|
|
|
u64 src, u64 dst)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = src;
|
|
|
|
key.type = BTRFS_QGROUP_RELATION_KEY;
|
|
|
|
key.offset = dst;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_del_item(trans, quota_root, path);
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int add_qgroup_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *quota_root, u64 qgroupid)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_qgroup_info_item *qgroup_info;
|
|
|
|
struct btrfs_qgroup_limit_item *qgroup_limit;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
2014-09-29 21:53:21 +00:00
|
|
|
if (btrfs_test_is_dummy_root(quota_root))
|
2014-05-07 21:06:09 +00:00
|
|
|
return 0;
|
2014-09-29 21:53:21 +00:00
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_QGROUP_INFO_KEY;
|
|
|
|
key.offset = qgroupid;
|
|
|
|
|
2014-08-18 21:01:17 +00:00
|
|
|
/*
|
|
|
|
* Avoid a transaction abort by catching -EEXIST here. In that
|
|
|
|
* case, we proceed by re-initializing the existing structure
|
|
|
|
* on disk.
|
|
|
|
*/
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
|
|
|
|
sizeof(*qgroup_info));
|
2014-08-18 21:01:17 +00:00
|
|
|
if (ret && ret != -EEXIST)
|
2012-06-28 16:03:02 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_qgroup_info_item);
|
|
|
|
btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
|
|
|
|
btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
|
|
|
|
btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
|
|
|
|
btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
|
|
|
|
btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
|
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
|
|
|
|
btrfs_release_path(path);
|
|
|
|
|
|
|
|
key.type = BTRFS_QGROUP_LIMIT_KEY;
|
|
|
|
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
|
|
|
|
sizeof(*qgroup_limit));
|
2014-08-18 21:01:17 +00:00
|
|
|
if (ret && ret != -EEXIST)
|
2012-06-28 16:03:02 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_qgroup_limit_item);
|
|
|
|
btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
|
|
|
|
btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
|
|
|
|
btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
|
|
|
|
btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
|
|
|
|
btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
|
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int del_qgroup_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *quota_root, u64 qgroupid)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_QGROUP_INFO_KEY;
|
|
|
|
key.offset = qgroupid;
|
|
|
|
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_del_item(trans, quota_root, path);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
btrfs_release_path(path);
|
|
|
|
|
|
|
|
key.type = BTRFS_QGROUP_LIMIT_KEY;
|
|
|
|
ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = btrfs_del_item(trans, quota_root, path);
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
|
2014-11-21 02:01:41 +00:00
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_qgroup *qgroup)
|
2012-06-28 16:03:02 +00:00
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *l;
|
|
|
|
struct btrfs_qgroup_limit_item *qgroup_limit;
|
|
|
|
int ret;
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_QGROUP_LIMIT_KEY;
|
2014-11-21 02:01:41 +00:00
|
|
|
key.offset = qgroup->qgroupid;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
2013-02-27 11:20:56 +00:00
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
l = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
2013-11-04 21:34:29 +00:00
|
|
|
qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
|
2014-11-21 02:01:41 +00:00
|
|
|
btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
|
|
|
|
btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
|
|
|
|
btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
|
|
|
|
btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
|
|
|
|
btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(l);
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root,
|
|
|
|
struct btrfs_qgroup *qgroup)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *l;
|
|
|
|
struct btrfs_qgroup_info_item *qgroup_info;
|
|
|
|
int ret;
|
|
|
|
int slot;
|
|
|
|
|
2014-09-29 21:53:21 +00:00
|
|
|
if (btrfs_test_is_dummy_root(root))
|
2014-05-07 21:06:09 +00:00
|
|
|
return 0;
|
2014-09-29 21:53:21 +00:00
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_QGROUP_INFO_KEY;
|
|
|
|
key.offset = qgroup->qgroupid;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
2013-02-27 11:20:56 +00:00
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
l = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
2013-11-04 21:34:29 +00:00
|
|
|
qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
|
2012-06-28 16:03:02 +00:00
|
|
|
btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
|
|
|
|
btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
|
|
|
|
btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
|
|
|
|
btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
|
|
|
|
btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
|
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(l);
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *l;
|
|
|
|
struct btrfs_qgroup_status_item *ptr;
|
|
|
|
int ret;
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_QGROUP_STATUS_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
2013-02-27 11:20:56 +00:00
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -ENOENT;
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
l = path->nodes[0];
|
|
|
|
slot = path->slots[0];
|
|
|
|
ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
|
|
|
|
btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
|
|
|
|
btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
|
2013-04-25 16:04:51 +00:00
|
|
|
btrfs_set_qgroup_status_rescan(l, ptr,
|
|
|
|
fs_info->qgroup_rescan_progress.objectid);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(l);
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called with qgroup_lock held
|
|
|
|
*/
|
|
|
|
static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
2013-02-27 11:16:57 +00:00
|
|
|
struct extent_buffer *leaf = NULL;
|
2012-06-28 16:03:02 +00:00
|
|
|
int ret;
|
2013-02-27 11:16:57 +00:00
|
|
|
int nr = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-02-27 11:16:57 +00:00
|
|
|
path->leave_spinning = 1;
|
|
|
|
|
|
|
|
key.objectid = 0;
|
|
|
|
key.offset = 0;
|
|
|
|
key.type = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-02-27 11:16:57 +00:00
|
|
|
while (1) {
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
|
2013-02-27 11:16:57 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
nr = btrfs_header_nritems(leaf);
|
|
|
|
if (!nr)
|
2012-06-28 16:03:02 +00:00
|
|
|
break;
|
2013-02-27 11:16:57 +00:00
|
|
|
/*
|
|
|
|
* delete the leaf one by one
|
|
|
|
* since the whole tree is going
|
|
|
|
* to be deleted.
|
|
|
|
*/
|
|
|
|
path->slots[0] = 0;
|
|
|
|
ret = btrfs_del_items(trans, root, path, 0, nr);
|
2012-06-28 16:03:02 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2013-02-27 11:16:57 +00:00
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
btrfs_release_path(path);
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
root->fs_info->pending_quota_state = 0;
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root;
|
2013-04-07 10:24:57 +00:00
|
|
|
struct btrfs_root *tree_root = fs_info->tree_root;
|
2012-06-28 16:03:02 +00:00
|
|
|
struct btrfs_path *path = NULL;
|
|
|
|
struct btrfs_qgroup_status_item *ptr;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_key key;
|
2013-04-07 10:24:57 +00:00
|
|
|
struct btrfs_key found_key;
|
|
|
|
struct btrfs_qgroup *qgroup = NULL;
|
2012-06-28 16:03:02 +00:00
|
|
|
int ret = 0;
|
2013-04-07 10:24:57 +00:00
|
|
|
int slot;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
if (fs_info->quota_root) {
|
|
|
|
fs_info->pending_quota_state = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-05-06 11:03:27 +00:00
|
|
|
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!fs_info->qgroup_ulist) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
/*
|
|
|
|
* initially create the quota tree
|
|
|
|
*/
|
|
|
|
quota_root = btrfs_create_tree(trans, fs_info,
|
|
|
|
BTRFS_QUOTA_TREE_OBJECTID);
|
|
|
|
if (IS_ERR(quota_root)) {
|
|
|
|
ret = PTR_ERR(quota_root);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
2012-10-16 05:44:21 +00:00
|
|
|
if (!path) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_free_root;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_QGROUP_STATUS_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
|
|
|
|
sizeof(*ptr));
|
|
|
|
if (ret)
|
2012-10-16 05:44:21 +00:00
|
|
|
goto out_free_path;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
ptr = btrfs_item_ptr(leaf, path->slots[0],
|
|
|
|
struct btrfs_qgroup_status_item);
|
|
|
|
btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
|
|
|
|
btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
|
|
|
|
fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
|
|
|
|
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
|
2013-04-25 16:04:51 +00:00
|
|
|
btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
btrfs_mark_buffer_dirty(leaf);
|
|
|
|
|
2013-04-07 10:24:57 +00:00
|
|
|
key.objectid = 0;
|
|
|
|
key.type = BTRFS_ROOT_REF_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
btrfs_release_path(path);
|
|
|
|
ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
|
|
|
|
if (ret > 0)
|
|
|
|
goto out_add_root;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_free_path;
|
|
|
|
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
slot = path->slots[0];
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
btrfs_item_key_to_cpu(leaf, &found_key, slot);
|
|
|
|
|
|
|
|
if (found_key.type == BTRFS_ROOT_REF_KEY) {
|
|
|
|
ret = add_qgroup_item(trans, quota_root,
|
|
|
|
found_key.offset);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_path;
|
|
|
|
|
|
|
|
qgroup = add_qgroup_rb(fs_info, found_key.offset);
|
|
|
|
if (IS_ERR(qgroup)) {
|
|
|
|
ret = PTR_ERR(qgroup);
|
|
|
|
goto out_free_path;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = btrfs_next_item(tree_root, path);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_free_path;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_add_root:
|
|
|
|
btrfs_release_path(path);
|
|
|
|
ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_path;
|
|
|
|
|
|
|
|
qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
|
|
|
|
if (IS_ERR(qgroup)) {
|
|
|
|
ret = PTR_ERR(qgroup);
|
|
|
|
goto out_free_path;
|
|
|
|
}
|
2013-04-07 10:50:17 +00:00
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
fs_info->quota_root = quota_root;
|
|
|
|
fs_info->pending_quota_state = 1;
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
2012-10-16 05:44:21 +00:00
|
|
|
out_free_path:
|
2012-06-28 16:03:02 +00:00
|
|
|
btrfs_free_path(path);
|
2012-10-16 05:44:21 +00:00
|
|
|
out_free_root:
|
|
|
|
if (ret) {
|
|
|
|
free_extent_buffer(quota_root->node);
|
|
|
|
free_extent_buffer(quota_root->commit_root);
|
|
|
|
kfree(quota_root);
|
|
|
|
}
|
|
|
|
out:
|
2013-05-28 15:47:23 +00:00
|
|
|
if (ret) {
|
2013-05-06 11:03:27 +00:00
|
|
|
ulist_free(fs_info->qgroup_ulist);
|
2013-05-28 15:47:23 +00:00
|
|
|
fs_info->qgroup_ulist = NULL;
|
|
|
|
}
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_root *tree_root = fs_info->tree_root;
|
|
|
|
struct btrfs_root *quota_root;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
2013-04-07 10:50:17 +00:00
|
|
|
if (!fs_info->quota_root)
|
2013-04-07 10:50:16 +00:00
|
|
|
goto out;
|
2013-04-07 10:50:17 +00:00
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
fs_info->quota_enabled = 0;
|
|
|
|
fs_info->pending_quota_state = 0;
|
|
|
|
quota_root = fs_info->quota_root;
|
|
|
|
fs_info->quota_root = NULL;
|
2015-02-27 08:24:26 +00:00
|
|
|
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
|
2012-06-28 16:03:02 +00:00
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
|
2013-08-14 01:13:37 +00:00
|
|
|
btrfs_free_qgroup_config(fs_info);
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = btrfs_clean_quota_tree(trans, quota_root);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
list_del("a_root->dirty_list);
|
|
|
|
|
|
|
|
btrfs_tree_lock(quota_root->node);
|
2014-11-21 08:15:07 +00:00
|
|
|
clean_tree_block(trans, tree_root->fs_info, quota_root->node);
|
2012-06-28 16:03:02 +00:00
|
|
|
btrfs_tree_unlock(quota_root->node);
|
|
|
|
btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
|
|
|
|
|
|
|
|
free_extent_buffer(quota_root->node);
|
|
|
|
free_extent_buffer(quota_root->commit_root);
|
|
|
|
kfree(quota_root);
|
|
|
|
out:
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-04-25 16:04:51 +00:00
|
|
|
static void qgroup_dirty(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup *qgroup)
|
2012-06-28 16:03:02 +00:00
|
|
|
{
|
2013-04-25 16:04:51 +00:00
|
|
|
if (list_empty(&qgroup->dirty))
|
|
|
|
list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
|
2015-02-27 08:24:27 +00:00
|
|
|
/*
|
|
|
|
* The easy accounting, if we are adding/removing the only ref for an extent
|
|
|
|
* then this qgroup and all of the parent qgroups get their refrence and
|
|
|
|
* exclusive counts adjusted.
|
|
|
|
*
|
|
|
|
* Caller should hold fs_info->qgroup_lock.
|
|
|
|
*/
|
|
|
|
static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
|
|
|
|
struct ulist *tmp, u64 ref_root,
|
|
|
|
u64 num_bytes, int sign)
|
|
|
|
{
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
struct btrfs_qgroup_list *glist;
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
qgroup = find_qgroup_rb(fs_info, ref_root);
|
|
|
|
if (!qgroup)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
qgroup->rfer += sign * num_bytes;
|
|
|
|
qgroup->rfer_cmpr += sign * num_bytes;
|
|
|
|
|
|
|
|
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
|
|
|
|
qgroup->excl += sign * num_bytes;
|
|
|
|
qgroup->excl_cmpr += sign * num_bytes;
|
|
|
|
if (sign > 0)
|
|
|
|
qgroup->reserved -= num_bytes;
|
|
|
|
|
|
|
|
qgroup_dirty(fs_info, qgroup);
|
|
|
|
|
|
|
|
/* Get all of the parent groups that contain this qgroup */
|
|
|
|
list_for_each_entry(glist, &qgroup->groups, next_group) {
|
|
|
|
ret = ulist_add(tmp, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Iterate all of the parents and adjust their reference counts */
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(tmp, &uiter))) {
|
|
|
|
qgroup = u64_to_ptr(unode->aux);
|
|
|
|
qgroup->rfer += sign * num_bytes;
|
|
|
|
qgroup->rfer_cmpr += sign * num_bytes;
|
|
|
|
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
|
|
|
|
qgroup->excl += sign * num_bytes;
|
|
|
|
if (sign > 0)
|
|
|
|
qgroup->reserved -= num_bytes;
|
|
|
|
qgroup->excl_cmpr += sign * num_bytes;
|
|
|
|
qgroup_dirty(fs_info, qgroup);
|
|
|
|
|
|
|
|
/* Add any parents of the parents */
|
|
|
|
list_for_each_entry(glist, &qgroup->groups, next_group) {
|
|
|
|
ret = ulist_add(tmp, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Quick path for updating qgroup with only excl refs.
|
|
|
|
*
|
|
|
|
* In that case, just update all parent will be enough.
|
|
|
|
* Or we needs to do a full rescan.
|
|
|
|
* Caller should also hold fs_info->qgroup_lock.
|
|
|
|
*
|
|
|
|
* Return 0 for quick update, return >0 for need to full rescan
|
|
|
|
* and mark INCONSISTENT flag.
|
|
|
|
* Return < 0 for other error.
|
|
|
|
*/
|
|
|
|
static int quick_update_accounting(struct btrfs_fs_info *fs_info,
|
|
|
|
struct ulist *tmp, u64 src, u64 dst,
|
|
|
|
int sign)
|
|
|
|
{
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
int ret = 1;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
qgroup = find_qgroup_rb(fs_info, src);
|
|
|
|
if (!qgroup)
|
|
|
|
goto out;
|
|
|
|
if (qgroup->excl == qgroup->rfer) {
|
|
|
|
ret = 0;
|
|
|
|
err = __qgroup_excl_accounting(fs_info, tmp, dst,
|
|
|
|
qgroup->excl, sign);
|
|
|
|
if (err < 0) {
|
|
|
|
ret = err;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
if (ret)
|
|
|
|
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
|
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root;
|
2013-04-07 10:50:18 +00:00
|
|
|
struct btrfs_qgroup *parent;
|
|
|
|
struct btrfs_qgroup *member;
|
2013-04-17 14:49:51 +00:00
|
|
|
struct btrfs_qgroup_list *list;
|
2015-02-27 08:24:27 +00:00
|
|
|
struct ulist *tmp;
|
2012-06-28 16:03:02 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2015-02-27 08:24:22 +00:00
|
|
|
/* Check the level of src and dst first */
|
|
|
|
if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-05-02 15:19:55 +00:00
|
|
|
tmp = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!tmp)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
quota_root = fs_info->quota_root;
|
2013-04-07 10:50:16 +00:00
|
|
|
if (!quota_root) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-04-07 10:50:18 +00:00
|
|
|
member = find_qgroup_rb(fs_info, src);
|
|
|
|
parent = find_qgroup_rb(fs_info, dst);
|
|
|
|
if (!member || !parent) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-17 14:49:51 +00:00
|
|
|
/* check if such qgroup relation exist firstly */
|
|
|
|
list_for_each_entry(list, &member->groups, next_group) {
|
|
|
|
if (list->group == parent) {
|
|
|
|
ret = -EEXIST;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = add_qgroup_relation_item(trans, quota_root, src, dst);
|
|
|
|
if (ret)
|
2013-04-07 10:50:16 +00:00
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
ret = add_qgroup_relation_item(trans, quota_root, dst, src);
|
|
|
|
if (ret) {
|
|
|
|
del_qgroup_relation_item(trans, quota_root, src, dst);
|
2013-04-07 10:50:16 +00:00
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
ret = add_relation_rb(quota_root->fs_info, src, dst);
|
2015-02-27 08:24:27 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
|
2012-06-28 16:03:02 +00:00
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
2013-04-07 10:50:16 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2015-02-27 08:24:27 +00:00
|
|
|
ulist_free(tmp);
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-11-24 15:27:09 +00:00
|
|
|
int __del_qgroup_relation(struct btrfs_trans_handle *trans,
|
2012-06-28 16:03:02 +00:00
|
|
|
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
|
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root;
|
2013-04-17 14:49:51 +00:00
|
|
|
struct btrfs_qgroup *parent;
|
|
|
|
struct btrfs_qgroup *member;
|
|
|
|
struct btrfs_qgroup_list *list;
|
2015-02-27 08:24:27 +00:00
|
|
|
struct ulist *tmp;
|
2012-06-28 16:03:02 +00:00
|
|
|
int ret = 0;
|
|
|
|
int err;
|
|
|
|
|
2015-02-27 08:24:27 +00:00
|
|
|
tmp = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!tmp)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
quota_root = fs_info->quota_root;
|
2013-04-07 10:50:16 +00:00
|
|
|
if (!quota_root) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-17 14:49:51 +00:00
|
|
|
member = find_qgroup_rb(fs_info, src);
|
|
|
|
parent = find_qgroup_rb(fs_info, dst);
|
|
|
|
if (!member || !parent) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if such qgroup relation exist firstly */
|
|
|
|
list_for_each_entry(list, &member->groups, next_group) {
|
|
|
|
if (list->group == parent)
|
|
|
|
goto exist;
|
|
|
|
}
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
exist:
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = del_qgroup_relation_item(trans, quota_root, src, dst);
|
|
|
|
err = del_qgroup_relation_item(trans, quota_root, dst, src);
|
|
|
|
if (err && !ret)
|
|
|
|
ret = err;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
del_relation_rb(fs_info, src, dst);
|
2015-02-27 08:24:27 +00:00
|
|
|
ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
|
2012-06-28 16:03:02 +00:00
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
2013-04-07 10:50:16 +00:00
|
|
|
out:
|
2015-02-27 08:24:27 +00:00
|
|
|
ulist_free(tmp);
|
2014-11-24 15:27:09 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
|
|
|
ret = __del_qgroup_relation(trans, fs_info, src, dst);
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2014-11-24 15:27:09 +00:00
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
|
2015-01-18 15:59:23 +00:00
|
|
|
struct btrfs_fs_info *fs_info, u64 qgroupid)
|
2012-06-28 16:03:02 +00:00
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
quota_root = fs_info->quota_root;
|
2013-04-07 10:50:16 +00:00
|
|
|
if (!quota_root) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-04-17 14:49:51 +00:00
|
|
|
qgroup = find_qgroup_rb(fs_info, qgroupid);
|
|
|
|
if (qgroup) {
|
|
|
|
ret = -EEXIST;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
ret = add_qgroup_item(trans, quota_root, qgroupid);
|
2013-04-17 14:49:51 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
qgroup = add_qgroup_rb(fs_info, qgroupid);
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
|
|
|
|
if (IS_ERR(qgroup))
|
|
|
|
ret = PTR_ERR(qgroup);
|
2013-04-07 10:50:16 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info, u64 qgroupid)
|
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root;
|
2013-01-17 08:22:09 +00:00
|
|
|
struct btrfs_qgroup *qgroup;
|
2014-11-24 15:27:09 +00:00
|
|
|
struct btrfs_qgroup_list *list;
|
2012-06-28 16:03:02 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
quota_root = fs_info->quota_root;
|
2013-04-07 10:50:16 +00:00
|
|
|
if (!quota_root) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-01-17 08:22:09 +00:00
|
|
|
qgroup = find_qgroup_rb(fs_info, qgroupid);
|
2013-04-17 14:49:51 +00:00
|
|
|
if (!qgroup) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
} else {
|
2014-11-24 15:27:09 +00:00
|
|
|
/* check if there are no children of this qgroup */
|
|
|
|
if (!list_empty(&qgroup->members)) {
|
2013-04-07 10:50:16 +00:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
2013-01-17 08:22:09 +00:00
|
|
|
}
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = del_qgroup_item(trans, quota_root, qgroupid);
|
|
|
|
|
2014-11-24 15:27:09 +00:00
|
|
|
while (!list_empty(&qgroup->groups)) {
|
|
|
|
list = list_first_entry(&qgroup->groups,
|
|
|
|
struct btrfs_qgroup_list, next_group);
|
|
|
|
ret = __del_qgroup_relation(trans, fs_info,
|
|
|
|
qgroupid,
|
|
|
|
list->group->qgroupid);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
del_qgroup_rb(quota_root->fs_info, qgroupid);
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
2013-04-07 10:50:16 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info, u64 qgroupid,
|
|
|
|
struct btrfs_qgroup_limit *limit)
|
|
|
|
{
|
2013-04-07 10:50:16 +00:00
|
|
|
struct btrfs_root *quota_root;
|
2012-06-28 16:03:02 +00:00
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
|
|
|
quota_root = fs_info->quota_root;
|
|
|
|
if (!quota_root) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-07 10:50:20 +00:00
|
|
|
qgroup = find_qgroup_rb(fs_info, qgroupid);
|
|
|
|
if (!qgroup) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-07 10:50:17 +00:00
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
2015-02-06 16:06:25 +00:00
|
|
|
if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
|
|
|
|
qgroup->max_rfer = limit->max_rfer;
|
|
|
|
if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
|
|
|
|
qgroup->max_excl = limit->max_excl;
|
|
|
|
if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
|
|
|
|
qgroup->rsv_rfer = limit->rsv_rfer;
|
|
|
|
if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
|
|
|
|
qgroup->rsv_excl = limit->rsv_excl;
|
|
|
|
qgroup->lim_flags |= limit->flags;
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
2014-11-21 02:01:41 +00:00
|
|
|
|
|
|
|
ret = update_qgroup_limit_item(trans, quota_root, qgroup);
|
|
|
|
if (ret) {
|
|
|
|
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
btrfs_info(fs_info, "unable to update quota limit for %llu",
|
|
|
|
qgroupid);
|
|
|
|
}
|
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2014-07-17 19:39:01 +00:00
|
|
|
|
|
|
|
static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
|
|
|
|
struct btrfs_qgroup_operation *oper2)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Ignore seq and type here, we're looking for any operation
|
|
|
|
* at all related to this extent on that root.
|
|
|
|
*/
|
|
|
|
if (oper1->bytenr < oper2->bytenr)
|
|
|
|
return -1;
|
|
|
|
if (oper1->bytenr > oper2->bytenr)
|
|
|
|
return 1;
|
|
|
|
if (oper1->ref_root < oper2->ref_root)
|
|
|
|
return -1;
|
|
|
|
if (oper1->ref_root > oper2->ref_root)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper)
|
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
struct btrfs_qgroup_operation *cur;
|
|
|
|
int cmp;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_op_lock);
|
|
|
|
n = fs_info->qgroup_op_tree.rb_node;
|
|
|
|
while (n) {
|
|
|
|
cur = rb_entry(n, struct btrfs_qgroup_operation, n);
|
|
|
|
cmp = comp_oper_exist(cur, oper);
|
|
|
|
if (cmp < 0) {
|
|
|
|
n = n->rb_right;
|
|
|
|
} else if (cmp) {
|
|
|
|
n = n->rb_left;
|
|
|
|
} else {
|
|
|
|
spin_unlock(&fs_info->qgroup_op_lock);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->qgroup_op_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
static int comp_oper(struct btrfs_qgroup_operation *oper1,
|
|
|
|
struct btrfs_qgroup_operation *oper2)
|
|
|
|
{
|
|
|
|
if (oper1->bytenr < oper2->bytenr)
|
|
|
|
return -1;
|
|
|
|
if (oper1->bytenr > oper2->bytenr)
|
|
|
|
return 1;
|
|
|
|
if (oper1->ref_root < oper2->ref_root)
|
|
|
|
return -1;
|
|
|
|
if (oper1->ref_root > oper2->ref_root)
|
|
|
|
return 1;
|
Btrfs: change the insertion criteria for the qgroup operations rbtree
After looking at Liu Bo's recent patch (titled
"Btrfs: fix comp_oper to get right order") I realized the search made by
qgroup_oper_exists() was buggy because its rbtree navigation comparison
function, comp_oper_exist(), only looks at the fields bytenr and ref_root
of a tree node, ignoring the seq field completely. This was wrong because
when we insert a node into the rbtree we use comp_oper(), which takes a
decision based first on bytenr, then on seq and then on the ref_root field.
That means qgroup_oper_exists() could miss the fact that at least one
operation with given bytenr and ref_root exists.
Consider the following simple example of a 3 nodes qgroup operations
rbtree (created using comp_oper before this patch), where each node's key
is a tuple with the shape (bytenr, seq, ref_root, op):
[ (4096, 2, 20, op X) ]
/ \
/ \
[ (4096, 1, 5, op Y) ] [ (4096, 3, 10, op Z) ]
qgroup_oper_exists() when called to search for an existing operation for
bytenr 4096 and ref root 10 wouldn't find anything because it would go to
the left subtree instead of the right subtree, since comp_oper_exits()
ignores the seq field completely.
Fix this by changing the insertion navigation function to use the ref_root
field right after using the bytenr field and before using the seq field,
so that qgroup_oper_exists() / comp_oper_exist() work as expected.
This patch applies on top of the patch mentioned above from Liu.
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
2015-03-14 07:03:27 +00:00
|
|
|
if (oper1->seq < oper2->seq)
|
|
|
|
return -1;
|
|
|
|
if (oper1->seq > oper2->seq)
|
|
|
|
return 1;
|
2014-05-14 00:30:47 +00:00
|
|
|
if (oper1->type < oper2->type)
|
|
|
|
return -1;
|
|
|
|
if (oper1->type > oper2->type)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int insert_qgroup_oper(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper)
|
|
|
|
{
|
|
|
|
struct rb_node **p;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct btrfs_qgroup_operation *cur;
|
|
|
|
int cmp;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_op_lock);
|
|
|
|
p = &fs_info->qgroup_op_tree.rb_node;
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
cur = rb_entry(parent, struct btrfs_qgroup_operation, n);
|
|
|
|
cmp = comp_oper(cur, oper);
|
|
|
|
if (cmp < 0) {
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
} else if (cmp) {
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
} else {
|
|
|
|
spin_unlock(&fs_info->qgroup_op_lock);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rb_link_node(&oper->n, parent, p);
|
|
|
|
rb_insert_color(&oper->n, &fs_info->qgroup_op_tree);
|
|
|
|
spin_unlock(&fs_info->qgroup_op_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
/*
|
2014-05-14 00:30:47 +00:00
|
|
|
* Record a quota operation for processing later on.
|
|
|
|
* @trans: the transaction we are adding the delayed op to.
|
|
|
|
* @fs_info: the fs_info for this fs.
|
|
|
|
* @ref_root: the root of the reference we are acting on,
|
|
|
|
* @bytenr: the bytenr we are acting on.
|
|
|
|
* @num_bytes: the number of bytes in the reference.
|
|
|
|
* @type: the type of operation this is.
|
|
|
|
* @mod_seq: do we need to get a sequence number for looking up roots.
|
|
|
|
*
|
|
|
|
* We just add it to our trans qgroup_ref_list and carry on and process these
|
|
|
|
* operations in order at some later point. If the reference root isn't a fs
|
|
|
|
* root then we don't bother with doing anything.
|
|
|
|
*
|
|
|
|
* MUST BE HOLDING THE REF LOCK.
|
2012-06-28 16:03:02 +00:00
|
|
|
*/
|
|
|
|
int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
|
2014-05-14 00:30:47 +00:00
|
|
|
struct btrfs_fs_info *fs_info, u64 ref_root,
|
|
|
|
u64 bytenr, u64 num_bytes,
|
|
|
|
enum btrfs_qgroup_operation_type type, int mod_seq)
|
2012-06-28 16:03:02 +00:00
|
|
|
{
|
2014-05-14 00:30:47 +00:00
|
|
|
struct btrfs_qgroup_operation *oper;
|
|
|
|
int ret;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
if (!is_fstree(ref_root) || !fs_info->quota_enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
oper = kmalloc(sizeof(*oper), GFP_NOFS);
|
|
|
|
if (!oper)
|
2012-06-28 16:03:02 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
oper->ref_root = ref_root;
|
|
|
|
oper->bytenr = bytenr;
|
|
|
|
oper->num_bytes = num_bytes;
|
|
|
|
oper->type = type;
|
|
|
|
oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
|
|
|
|
INIT_LIST_HEAD(&oper->elem.list);
|
|
|
|
oper->elem.seq = 0;
|
2014-07-17 19:39:01 +00:00
|
|
|
|
2014-07-17 19:39:00 +00:00
|
|
|
trace_btrfs_qgroup_record_ref(oper);
|
|
|
|
|
2014-07-17 19:39:01 +00:00
|
|
|
if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
|
|
|
|
/*
|
|
|
|
* If any operation for this bytenr/ref_root combo
|
|
|
|
* exists, then we know it's not exclusively owned and
|
|
|
|
* shouldn't be queued up.
|
|
|
|
*
|
|
|
|
* This also catches the case where we have a cloned
|
|
|
|
* extent that gets queued up multiple times during
|
|
|
|
* drop snapshot.
|
|
|
|
*/
|
|
|
|
if (qgroup_oper_exists(fs_info, oper)) {
|
|
|
|
kfree(oper);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = insert_qgroup_oper(fs_info, oper);
|
|
|
|
if (ret) {
|
|
|
|
/* Shouldn't happen so have an assert for developers */
|
|
|
|
ASSERT(0);
|
|
|
|
kfree(oper);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
list_add_tail(&oper->list, &trans->qgroup_ref_list);
|
|
|
|
|
|
|
|
if (mod_seq)
|
|
|
|
btrfs_get_tree_mod_seq(fs_info, &oper->elem);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-16 06:34:17 +00:00
|
|
|
struct btrfs_qgroup_extent_record
|
|
|
|
*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
|
|
|
|
struct btrfs_qgroup_extent_record *record)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
|
|
|
|
struct rb_node *parent_node = NULL;
|
|
|
|
struct btrfs_qgroup_extent_record *entry;
|
|
|
|
u64 bytenr = record->bytenr;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent_node = *p;
|
|
|
|
entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
|
|
|
|
node);
|
|
|
|
if (bytenr < entry->bytenr)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (bytenr > entry->bytenr)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&record->node, parent_node, p);
|
|
|
|
rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The easy accounting, if we are adding/removing the only ref for an extent
|
|
|
|
* then this qgroup and all of the parent qgroups get their refrence and
|
|
|
|
* exclusive counts adjusted.
|
|
|
|
*/
|
2014-05-14 00:30:47 +00:00
|
|
|
static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper)
|
|
|
|
{
|
|
|
|
struct ulist *tmp;
|
|
|
|
int sign = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
tmp = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!tmp)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
if (!fs_info->quota_root)
|
|
|
|
goto out;
|
2015-02-27 08:24:27 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
switch (oper->type) {
|
|
|
|
case BTRFS_QGROUP_OPER_ADD_EXCL:
|
|
|
|
sign = 1;
|
|
|
|
break;
|
|
|
|
case BTRFS_QGROUP_OPER_SUB_EXCL:
|
|
|
|
sign = -1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
}
|
2015-02-27 08:24:27 +00:00
|
|
|
ret = __qgroup_excl_accounting(fs_info, tmp, oper->ref_root,
|
|
|
|
oper->num_bytes, sign);
|
2014-05-14 00:30:47 +00:00
|
|
|
out:
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
ulist_free(tmp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk all of the roots that pointed to our bytenr and adjust their refcnts as
|
|
|
|
* properly.
|
|
|
|
*/
|
|
|
|
static int qgroup_calc_old_refcnt(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 root_to_skip, struct ulist *tmp,
|
|
|
|
struct ulist *roots, struct ulist *qgroups,
|
|
|
|
u64 seq, int *old_roots, int rescan)
|
2013-04-25 16:04:50 +00:00
|
|
|
{
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
struct ulist_node *tmp_unode;
|
|
|
|
struct ulist_iterator tmp_uiter;
|
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(roots, &uiter))) {
|
2014-05-14 00:30:47 +00:00
|
|
|
/* We don't count our current root here */
|
|
|
|
if (unode->val == root_to_skip)
|
|
|
|
continue;
|
2013-04-25 16:04:50 +00:00
|
|
|
qg = find_qgroup_rb(fs_info, unode->val);
|
|
|
|
if (!qg)
|
|
|
|
continue;
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* We could have a pending removal of this same ref so we may
|
|
|
|
* not have actually found our ref root when doing
|
|
|
|
* btrfs_find_all_roots, so we need to keep track of how many
|
|
|
|
* old roots we find in case we removed ours and added a
|
|
|
|
* different one at the same time. I don't think this could
|
|
|
|
* happen in practice but that sort of thinking leads to pain
|
|
|
|
* and suffering and to the dark side.
|
|
|
|
*/
|
|
|
|
(*old_roots)++;
|
2013-04-25 16:04:50 +00:00
|
|
|
|
|
|
|
ulist_reinit(tmp);
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
|
2013-04-25 16:04:50 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ULIST_ITER_INIT(&tmp_uiter);
|
|
|
|
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
|
|
|
|
struct btrfs_qgroup_list *glist;
|
2015-03-12 08:10:13 +00:00
|
|
|
int mod;
|
2013-04-25 16:04:50 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
qg = u64_to_ptr(tmp_unode->aux);
|
|
|
|
/*
|
|
|
|
* We use this sequence number to keep from having to
|
|
|
|
* run the whole list and 0 out the refcnt every time.
|
|
|
|
* We basically use sequnce as the known 0 count and
|
|
|
|
* then add 1 everytime we see a qgroup. This is how we
|
|
|
|
* get how many of the roots actually point up to the
|
|
|
|
* upper level qgroups in order to determine exclusive
|
|
|
|
* counts.
|
|
|
|
*
|
2015-03-12 08:10:13 +00:00
|
|
|
* For rescan none of the extent is recorded before so
|
|
|
|
* we just don't add old_refcnt.
|
2014-05-14 00:30:47 +00:00
|
|
|
*/
|
|
|
|
if (rescan)
|
2015-03-12 08:10:13 +00:00
|
|
|
mod = 0;
|
2014-05-14 00:30:47 +00:00
|
|
|
else
|
2015-03-12 08:10:13 +00:00
|
|
|
mod = 1;
|
|
|
|
btrfs_qgroup_update_old_refcnt(qg, seq, mod);
|
|
|
|
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
|
2013-04-25 16:04:50 +00:00
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = ulist_add(qgroups, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-04-25 16:04:50 +00:00
|
|
|
ret = ulist_add(tmp, glist->group->qgroupid,
|
2014-05-14 00:30:47 +00:00
|
|
|
ptr_to_u64(glist->group),
|
2013-04-25 16:04:50 +00:00
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-14 00:30:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-04-25 16:04:50 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* We need to walk forward in our operation tree and account for any roots that
|
|
|
|
* were deleted after we made this operation.
|
|
|
|
*/
|
|
|
|
static int qgroup_account_deleted_refs(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper,
|
|
|
|
struct ulist *tmp,
|
|
|
|
struct ulist *qgroups, u64 seq,
|
|
|
|
int *old_roots)
|
|
|
|
{
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
struct btrfs_qgroup_operation *tmp_oper;
|
|
|
|
struct rb_node *n;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ulist_reinit(tmp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only walk forward in the tree since we're only interested in
|
|
|
|
* removals that happened _after_ our operation.
|
|
|
|
*/
|
|
|
|
spin_lock(&fs_info->qgroup_op_lock);
|
|
|
|
n = rb_next(&oper->n);
|
|
|
|
spin_unlock(&fs_info->qgroup_op_lock);
|
|
|
|
if (!n)
|
|
|
|
return 0;
|
|
|
|
tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
|
|
|
|
while (tmp_oper->bytenr == oper->bytenr) {
|
|
|
|
/*
|
|
|
|
* If it's not a removal we don't care, additions work out
|
|
|
|
* properly with our refcnt tracking.
|
|
|
|
*/
|
|
|
|
if (tmp_oper->type != BTRFS_QGROUP_OPER_SUB_SHARED &&
|
|
|
|
tmp_oper->type != BTRFS_QGROUP_OPER_SUB_EXCL)
|
|
|
|
goto next;
|
|
|
|
qg = find_qgroup_rb(fs_info, tmp_oper->ref_root);
|
|
|
|
if (!qg)
|
|
|
|
goto next;
|
|
|
|
ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret) {
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
/*
|
|
|
|
* We only want to increase old_roots if this qgroup is
|
|
|
|
* not already in the list of qgroups. If it is already
|
|
|
|
* there then that means it must have been re-added or
|
|
|
|
* the delete will be discarded because we had an
|
|
|
|
* existing ref that we haven't looked up yet. In this
|
|
|
|
* case we don't want to increase old_roots. So if ret
|
|
|
|
* == 1 then we know that this is the first time we've
|
|
|
|
* seen this qgroup and we can bump the old_roots.
|
|
|
|
*/
|
|
|
|
(*old_roots)++;
|
|
|
|
ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
spin_lock(&fs_info->qgroup_op_lock);
|
|
|
|
n = rb_next(&tmp_oper->n);
|
|
|
|
spin_unlock(&fs_info->qgroup_op_lock);
|
|
|
|
if (!n)
|
|
|
|
break;
|
|
|
|
tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ok now process the qgroups we found */
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(tmp, &uiter))) {
|
|
|
|
struct btrfs_qgroup_list *glist;
|
|
|
|
|
|
|
|
qg = u64_to_ptr(unode->aux);
|
2015-03-12 08:10:13 +00:00
|
|
|
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
|
|
|
|
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
|
2014-05-14 00:30:47 +00:00
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
|
|
|
ret = ulist_add(qgroups, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = ulist_add(tmp, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2013-04-25 16:04:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/* Add refcnt for the newly added reference. */
|
|
|
|
static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper,
|
|
|
|
struct btrfs_qgroup *qgroup,
|
|
|
|
struct ulist *tmp, struct ulist *qgroups,
|
|
|
|
u64 seq)
|
2013-04-25 16:04:50 +00:00
|
|
|
{
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ulist_reinit(tmp);
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = ulist_add(qgroups, qgroup->qgroupid, ptr_to_u64(qgroup),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = ulist_add(tmp, qgroup->qgroupid, ptr_to_u64(qgroup),
|
|
|
|
GFP_ATOMIC);
|
2013-04-25 16:04:50 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(tmp, &uiter))) {
|
2014-05-14 00:30:47 +00:00
|
|
|
struct btrfs_qgroup_list *glist;
|
2013-04-25 16:04:50 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
qg = u64_to_ptr(unode->aux);
|
2015-03-12 08:10:13 +00:00
|
|
|
if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED)
|
|
|
|
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
|
|
|
|
else
|
|
|
|
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
|
2013-04-25 16:04:50 +00:00
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
|
|
|
ret = ulist_add(tmp, glist->group->qgroupid,
|
2014-05-14 00:30:47 +00:00
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = ulist_add(qgroups, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
2013-04-25 16:04:50 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-12 08:52:34 +00:00
|
|
|
#define UPDATE_NEW 0
|
|
|
|
#define UPDATE_OLD 1
|
|
|
|
/*
|
|
|
|
* Walk all of the roots that points to the bytenr and adjust their refcnts.
|
|
|
|
*/
|
|
|
|
static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
|
|
|
|
struct ulist *roots, struct ulist *tmp,
|
|
|
|
struct ulist *qgroups, u64 seq, int update_old)
|
|
|
|
{
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
struct ulist_node *tmp_unode;
|
|
|
|
struct ulist_iterator tmp_uiter;
|
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!roots)
|
|
|
|
return 0;
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(roots, &uiter))) {
|
|
|
|
qg = find_qgroup_rb(fs_info, unode->val);
|
|
|
|
if (!qg)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ulist_reinit(tmp);
|
|
|
|
ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ULIST_ITER_INIT(&tmp_uiter);
|
|
|
|
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
|
|
|
|
struct btrfs_qgroup_list *glist;
|
|
|
|
|
|
|
|
qg = u64_to_ptr(tmp_unode->aux);
|
|
|
|
if (update_old)
|
|
|
|
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
|
|
|
|
else
|
|
|
|
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
|
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
|
|
|
ret = ulist_add(qgroups, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = ulist_add(tmp, glist->group->qgroupid,
|
|
|
|
ptr_to_u64(glist->group),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-12 08:59:57 +00:00
|
|
|
/*
|
|
|
|
* Update qgroup rfer/excl counters.
|
|
|
|
* Rfer update is easy, codes can explain themselves.
|
|
|
|
* Excl update is tricky, the update is split into 2 part.
|
|
|
|
* Part 1: Possible exclusive <-> sharing detect:
|
|
|
|
* | A | !A |
|
|
|
|
* -------------------------------------
|
|
|
|
* B | * | - |
|
|
|
|
* -------------------------------------
|
|
|
|
* !B | + | ** |
|
|
|
|
* -------------------------------------
|
|
|
|
*
|
|
|
|
* Conditions:
|
|
|
|
* A: cur_old_roots < nr_old_roots (not exclusive before)
|
|
|
|
* !A: cur_old_roots == nr_old_roots (possible exclusive before)
|
|
|
|
* B: cur_new_roots < nr_new_roots (not exclusive now)
|
|
|
|
* !B: cur_new_roots == nr_new_roots (possible exclsuive now)
|
|
|
|
*
|
|
|
|
* Results:
|
|
|
|
* +: Possible sharing -> exclusive -: Possible exclusive -> sharing
|
|
|
|
* *: Definitely not changed. **: Possible unchanged.
|
|
|
|
*
|
|
|
|
* For !A and !B condition, the exception is cur_old/new_roots == 0 case.
|
|
|
|
*
|
|
|
|
* To make the logic clear, we first use condition A and B to split
|
|
|
|
* combination into 4 results.
|
|
|
|
*
|
|
|
|
* Then, for result "+" and "-", check old/new_roots == 0 case, as in them
|
|
|
|
* only on variant maybe 0.
|
|
|
|
*
|
|
|
|
* Lastly, check result **, since there are 2 variants maybe 0, split them
|
|
|
|
* again(2x2).
|
|
|
|
* But this time we don't need to consider other things, the codes and logic
|
|
|
|
* is easy to understand now.
|
|
|
|
*/
|
|
|
|
static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
|
|
|
|
struct ulist *qgroups,
|
|
|
|
u64 nr_old_roots,
|
|
|
|
u64 nr_new_roots,
|
|
|
|
u64 num_bytes, u64 seq)
|
|
|
|
{
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
u64 cur_new_count, cur_old_count;
|
|
|
|
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(qgroups, &uiter))) {
|
|
|
|
bool dirty = false;
|
|
|
|
|
|
|
|
qg = u64_to_ptr(unode->aux);
|
|
|
|
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
|
|
|
|
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
|
|
|
|
|
|
|
|
/* Rfer update part */
|
|
|
|
if (cur_old_count == 0 && cur_new_count > 0) {
|
|
|
|
qg->rfer += num_bytes;
|
|
|
|
qg->rfer_cmpr += num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
if (cur_old_count > 0 && cur_new_count == 0) {
|
|
|
|
qg->rfer -= num_bytes;
|
|
|
|
qg->rfer_cmpr -= num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Excl update part */
|
|
|
|
/* Exclusive/none -> shared case */
|
|
|
|
if (cur_old_count == nr_old_roots &&
|
|
|
|
cur_new_count < nr_new_roots) {
|
|
|
|
/* Exclusive -> shared */
|
|
|
|
if (cur_old_count != 0) {
|
|
|
|
qg->excl -= num_bytes;
|
|
|
|
qg->excl_cmpr -= num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shared -> exclusive/none case */
|
|
|
|
if (cur_old_count < nr_old_roots &&
|
|
|
|
cur_new_count == nr_new_roots) {
|
|
|
|
/* Shared->exclusive */
|
|
|
|
if (cur_new_count != 0) {
|
|
|
|
qg->excl += num_bytes;
|
|
|
|
qg->excl_cmpr += num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Exclusive/none -> exclusive/none case */
|
|
|
|
if (cur_old_count == nr_old_roots &&
|
|
|
|
cur_new_count == nr_new_roots) {
|
|
|
|
if (cur_old_count == 0) {
|
|
|
|
/* None -> exclusive/none */
|
|
|
|
|
|
|
|
if (cur_new_count != 0) {
|
|
|
|
/* None -> exclusive */
|
|
|
|
qg->excl += num_bytes;
|
|
|
|
qg->excl_cmpr += num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
/* None -> none, nothing changed */
|
|
|
|
} else {
|
|
|
|
/* Exclusive -> exclusive/none */
|
|
|
|
|
|
|
|
if (cur_new_count == 0) {
|
|
|
|
/* Exclusive -> none */
|
|
|
|
qg->excl -= num_bytes;
|
|
|
|
qg->excl_cmpr -= num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
/* Exclusive -> exclusive, nothing changed */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (dirty)
|
|
|
|
qgroup_dirty(fs_info, qg);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* This adjusts the counters for all referenced qgroups if need be.
|
|
|
|
*/
|
|
|
|
static int qgroup_adjust_counters(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 root_to_skip, u64 num_bytes,
|
|
|
|
struct ulist *qgroups, u64 seq,
|
|
|
|
int old_roots, int new_roots, int rescan)
|
2013-04-25 16:04:50 +00:00
|
|
|
{
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
struct btrfs_qgroup *qg;
|
2014-05-14 00:30:47 +00:00
|
|
|
u64 cur_new_count, cur_old_count;
|
2013-04-25 16:04:50 +00:00
|
|
|
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
2014-05-14 00:30:47 +00:00
|
|
|
while ((unode = ulist_next(qgroups, &uiter))) {
|
|
|
|
bool dirty = false;
|
2013-04-25 16:04:50 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
qg = u64_to_ptr(unode->aux);
|
2015-03-12 08:10:13 +00:00
|
|
|
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
|
|
|
|
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* Wasn't referenced before but is now, add to the reference
|
|
|
|
* counters.
|
|
|
|
*/
|
2015-03-12 08:10:13 +00:00
|
|
|
if (cur_old_count == 0 && cur_new_count > 0) {
|
2014-05-14 00:30:47 +00:00
|
|
|
qg->rfer += num_bytes;
|
|
|
|
qg->rfer_cmpr += num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
2013-04-25 16:04:50 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* Was referenced before but isn't now, subtract from the
|
|
|
|
* reference counters.
|
|
|
|
*/
|
2015-03-12 08:10:13 +00:00
|
|
|
if (cur_old_count > 0 && cur_new_count == 0) {
|
2014-05-14 00:30:47 +00:00
|
|
|
qg->rfer -= num_bytes;
|
|
|
|
qg->rfer_cmpr -= num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
2013-04-25 16:04:50 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* If our refcount was the same as the roots previously but our
|
|
|
|
* new count isn't the same as the number of roots now then we
|
|
|
|
* went from having a exclusive reference on this range to not.
|
|
|
|
*/
|
|
|
|
if (old_roots && cur_old_count == old_roots &&
|
|
|
|
(cur_new_count != new_roots || new_roots == 0)) {
|
|
|
|
WARN_ON(cur_new_count != new_roots && new_roots == 0);
|
|
|
|
qg->excl -= num_bytes;
|
|
|
|
qg->excl_cmpr -= num_bytes;
|
|
|
|
dirty = true;
|
|
|
|
}
|
2013-04-25 16:04:50 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* If we didn't reference all the roots before but now we do we
|
|
|
|
* have an exclusive reference to this range.
|
|
|
|
*/
|
|
|
|
if ((!old_roots || (old_roots && cur_old_count != old_roots))
|
|
|
|
&& cur_new_count == new_roots) {
|
|
|
|
qg->excl += num_bytes;
|
|
|
|
qg->excl_cmpr += num_bytes;
|
|
|
|
dirty = true;
|
2013-04-25 16:04:50 +00:00
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
if (dirty)
|
|
|
|
qgroup_dirty(fs_info, qg);
|
|
|
|
}
|
2013-04-25 16:04:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
/*
|
2014-05-14 00:30:47 +00:00
|
|
|
* If we removed a data extent and there were other references for that bytenr
|
|
|
|
* then we need to lookup all referenced roots to make sure we still don't
|
|
|
|
* reference this bytenr. If we do then we can just discard this operation.
|
2012-06-28 16:03:02 +00:00
|
|
|
*/
|
2014-05-14 00:30:47 +00:00
|
|
|
static int check_existing_refs(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper)
|
2012-06-28 16:03:02 +00:00
|
|
|
{
|
|
|
|
struct ulist *roots = NULL;
|
2014-05-14 00:30:47 +00:00
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
2012-06-28 16:03:02 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
|
|
|
|
oper->elem.seq, &roots);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
ret = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(roots, &uiter))) {
|
|
|
|
if (unode->val == oper->ref_root) {
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
2014-05-14 00:30:47 +00:00
|
|
|
ulist_free(roots);
|
|
|
|
btrfs_put_tree_mod_seq(fs_info, &oper->elem);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* If we share a reference across multiple roots then we may need to adjust
|
|
|
|
* various qgroups referenced and exclusive counters. The basic premise is this
|
|
|
|
*
|
|
|
|
* 1) We have seq to represent a 0 count. Instead of looping through all of the
|
|
|
|
* qgroups and resetting their refcount to 0 we just constantly bump this
|
|
|
|
* sequence number to act as the base reference count. This means that if
|
|
|
|
* anybody is equal to or below this sequence they were never referenced. We
|
|
|
|
* jack this sequence up by the number of roots we found each time in order to
|
|
|
|
* make sure we don't have any overlap.
|
|
|
|
*
|
|
|
|
* 2) We first search all the roots that reference the area _except_ the root
|
|
|
|
* we're acting on currently. This makes up the old_refcnt of all the qgroups
|
|
|
|
* before.
|
|
|
|
*
|
|
|
|
* 3) We walk all of the qgroups referenced by the root we are currently acting
|
|
|
|
* on, and will either adjust old_refcnt in the case of a removal or the
|
|
|
|
* new_refcnt in the case of an addition.
|
|
|
|
*
|
|
|
|
* 4) Finally we walk all the qgroups that are referenced by this range
|
|
|
|
* including the root we are acting on currently. We will adjust the counters
|
|
|
|
* based on the number of roots we had and will have after this operation.
|
|
|
|
*
|
|
|
|
* Take this example as an illustration
|
|
|
|
*
|
|
|
|
* [qgroup 1/0]
|
|
|
|
* / | \
|
|
|
|
* [qg 0/0] [qg 0/1] [qg 0/2]
|
|
|
|
* \ | /
|
|
|
|
* [ extent ]
|
|
|
|
*
|
|
|
|
* Say we are adding a reference that is covered by qg 0/0. The first step
|
|
|
|
* would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
|
|
|
|
* old_roots being 2. Because it is adding new_roots will be 1. We then go
|
|
|
|
* through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
|
|
|
|
* new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
|
|
|
|
* notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
|
|
|
|
* reference and thus must add the size to the referenced bytes. Everything
|
|
|
|
* else is the same so nothing else changes.
|
|
|
|
*/
|
|
|
|
static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper)
|
|
|
|
{
|
|
|
|
struct ulist *roots = NULL;
|
|
|
|
struct ulist *qgroups, *tmp;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
2015-02-25 14:47:32 +00:00
|
|
|
struct seq_list elem = SEQ_LIST_INIT(elem);
|
2014-05-14 00:30:47 +00:00
|
|
|
u64 seq;
|
|
|
|
int old_roots = 0;
|
|
|
|
int new_roots = 0;
|
|
|
|
int ret = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
if (oper->elem.seq) {
|
|
|
|
ret = check_existing_refs(trans, fs_info, oper);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret)
|
2013-04-25 16:04:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
qgroups = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!qgroups)
|
|
|
|
return -ENOMEM;
|
2013-04-25 16:04:51 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
tmp = ulist_alloc(GFP_NOFS);
|
2014-06-12 05:14:59 +00:00
|
|
|
if (!tmp) {
|
|
|
|
ulist_free(qgroups);
|
2014-05-14 00:30:47 +00:00
|
|
|
return -ENOMEM;
|
2014-06-12 05:14:59 +00:00
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
btrfs_get_tree_mod_seq(fs_info, &elem);
|
|
|
|
ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
|
|
|
|
&roots);
|
|
|
|
btrfs_put_tree_mod_seq(fs_info, &elem);
|
|
|
|
if (ret < 0) {
|
|
|
|
ulist_free(qgroups);
|
|
|
|
ulist_free(tmp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
qgroup = find_qgroup_rb(fs_info, oper->ref_root);
|
2012-06-28 16:03:02 +00:00
|
|
|
if (!qgroup)
|
2014-05-14 00:30:47 +00:00
|
|
|
goto out;
|
|
|
|
seq = fs_info->qgroup_seq;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
/*
|
2014-05-14 00:30:47 +00:00
|
|
|
* So roots is the list of all the roots currently pointing at the
|
|
|
|
* bytenr, including the ref we are adding if we are adding, or not if
|
|
|
|
* we are removing a ref. So we pass in the ref_root to skip that root
|
|
|
|
* in our calculations. We set old_refnct and new_refcnt cause who the
|
|
|
|
* hell knows what everything looked like before, and it doesn't matter
|
|
|
|
* except...
|
2012-06-28 16:03:02 +00:00
|
|
|
*/
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = qgroup_calc_old_refcnt(fs_info, oper->ref_root, tmp, roots, qgroups,
|
|
|
|
seq, &old_roots, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* Now adjust the refcounts of the qgroups that care about this
|
|
|
|
* reference, either the old_count in the case of removal or new_count
|
|
|
|
* in the case of an addition.
|
|
|
|
*/
|
|
|
|
ret = qgroup_calc_new_refcnt(fs_info, oper, qgroup, tmp, qgroups,
|
|
|
|
seq);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
/*
|
2014-05-14 00:30:47 +00:00
|
|
|
* ...in the case of removals. If we had a removal before we got around
|
|
|
|
* to processing this operation then we need to find that guy and count
|
|
|
|
* his references as if they really existed so we don't end up screwing
|
|
|
|
* up the exclusive counts. Then whenever we go to process the delete
|
|
|
|
* everything will be grand and we can account for whatever exclusive
|
|
|
|
* changes need to be made there. We also have to pass in old_roots so
|
|
|
|
* we have an accurate count of the roots as it pertains to this
|
|
|
|
* operations view of the world.
|
2012-06-28 16:03:02 +00:00
|
|
|
*/
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = qgroup_account_deleted_refs(fs_info, oper, tmp, qgroups, seq,
|
|
|
|
&old_roots);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
/*
|
2014-05-14 00:30:47 +00:00
|
|
|
* We are adding our root, need to adjust up the number of roots,
|
|
|
|
* otherwise old_roots is the number of roots we want.
|
2012-06-28 16:03:02 +00:00
|
|
|
*/
|
2014-05-14 00:30:47 +00:00
|
|
|
if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
|
|
|
|
new_roots = old_roots + 1;
|
|
|
|
} else {
|
|
|
|
new_roots = old_roots;
|
|
|
|
old_roots++;
|
|
|
|
}
|
2015-03-12 08:10:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Bump qgroup_seq to avoid seq overlap
|
|
|
|
* XXX: This makes qgroup_seq mismatch with oper->seq.
|
|
|
|
*/
|
2014-05-14 00:30:47 +00:00
|
|
|
fs_info->qgroup_seq += old_roots + 1;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* And now the magic happens, bless Arne for having a pretty elegant
|
|
|
|
* solution for this.
|
|
|
|
*/
|
|
|
|
qgroup_adjust_counters(fs_info, oper->ref_root, oper->num_bytes,
|
|
|
|
qgroups, seq, old_roots, new_roots, 0);
|
|
|
|
out:
|
2012-06-28 16:03:02 +00:00
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
2014-05-14 00:30:47 +00:00
|
|
|
ulist_free(qgroups);
|
2012-06-28 16:03:02 +00:00
|
|
|
ulist_free(roots);
|
2014-05-14 00:30:47 +00:00
|
|
|
ulist_free(tmp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-17 19:39:01 +00:00
|
|
|
/*
|
|
|
|
* Process a reference to a shared subtree. This type of operation is
|
|
|
|
* queued during snapshot removal when we encounter extents which are
|
|
|
|
* shared between more than one root.
|
|
|
|
*/
|
|
|
|
static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper)
|
|
|
|
{
|
|
|
|
struct ulist *roots = NULL;
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
struct btrfs_qgroup_list *glist;
|
|
|
|
struct ulist *parents;
|
|
|
|
int ret = 0;
|
2014-07-17 19:39:04 +00:00
|
|
|
int err;
|
2014-07-17 19:39:01 +00:00
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
u64 root_obj = 0;
|
2015-02-25 14:47:32 +00:00
|
|
|
struct seq_list elem = SEQ_LIST_INIT(elem);
|
2014-07-17 19:39:01 +00:00
|
|
|
|
|
|
|
parents = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!parents)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
btrfs_get_tree_mod_seq(fs_info, &elem);
|
|
|
|
ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
|
|
|
|
elem.seq, &roots);
|
|
|
|
btrfs_put_tree_mod_seq(fs_info, &elem);
|
|
|
|
if (ret < 0)
|
2014-08-17 20:09:21 +00:00
|
|
|
goto out;
|
2014-07-17 19:39:01 +00:00
|
|
|
|
|
|
|
if (roots->nnodes != 1)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
|
|
|
|
/*
|
|
|
|
* If we find our ref root then that means all refs
|
|
|
|
* this extent has to the root have not yet been
|
|
|
|
* deleted. In that case, we do nothing and let the
|
|
|
|
* last ref for this bytenr drive our update.
|
|
|
|
*
|
|
|
|
* This can happen for example if an extent is
|
|
|
|
* referenced multiple times in a snapshot (clone,
|
|
|
|
* etc). If we are in the middle of snapshot removal,
|
|
|
|
* queued updates for such an extent will find the
|
|
|
|
* root if we have not yet finished removing the
|
|
|
|
* snapshot.
|
|
|
|
*/
|
|
|
|
if (unode->val == oper->ref_root)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
root_obj = unode->val;
|
|
|
|
BUG_ON(!root_obj);
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
qg = find_qgroup_rb(fs_info, root_obj);
|
|
|
|
if (!qg)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
qg->excl += oper->num_bytes;
|
|
|
|
qg->excl_cmpr += oper->num_bytes;
|
|
|
|
qgroup_dirty(fs_info, qg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adjust counts for parent groups. First we find all
|
|
|
|
* parents, then in the 2nd loop we do the adjustment
|
|
|
|
* while adding parents of the parents to our ulist.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
2014-07-17 19:39:04 +00:00
|
|
|
err = ulist_add(parents, glist->group->qgroupid,
|
2014-07-17 19:39:01 +00:00
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
2014-07-17 19:39:04 +00:00
|
|
|
if (err < 0) {
|
|
|
|
ret = err;
|
2014-07-17 19:39:01 +00:00
|
|
|
goto out_unlock;
|
2014-07-17 19:39:04 +00:00
|
|
|
}
|
2014-07-17 19:39:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
|
|
|
while ((unode = ulist_next(parents, &uiter))) {
|
|
|
|
qg = u64_to_ptr(unode->aux);
|
|
|
|
qg->excl += oper->num_bytes;
|
|
|
|
qg->excl_cmpr += oper->num_bytes;
|
|
|
|
qgroup_dirty(fs_info, qg);
|
|
|
|
|
|
|
|
/* Add any parents of the parents */
|
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
2014-07-17 19:39:04 +00:00
|
|
|
err = ulist_add(parents, glist->group->qgroupid,
|
2014-07-17 19:39:01 +00:00
|
|
|
ptr_to_u64(glist->group), GFP_ATOMIC);
|
2014-07-17 19:39:04 +00:00
|
|
|
if (err < 0) {
|
|
|
|
ret = err;
|
2014-07-17 19:39:01 +00:00
|
|
|
goto out_unlock;
|
2014-07-17 19:39:04 +00:00
|
|
|
}
|
2014-07-17 19:39:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
|
|
|
|
out:
|
|
|
|
ulist_free(roots);
|
|
|
|
ulist_free(parents);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* btrfs_qgroup_account_ref is called for every ref that is added to or deleted
|
|
|
|
* from the fs. First, all roots referencing the extent are searched, and
|
|
|
|
* then the space is accounted accordingly to the different roots. The
|
|
|
|
* accounting algorithm works in 3 steps documented inline.
|
|
|
|
*/
|
|
|
|
static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_qgroup_operation *oper)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!fs_info->quota_enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
BUG_ON(!fs_info->quota_root);
|
|
|
|
|
|
|
|
mutex_lock(&fs_info->qgroup_rescan_lock);
|
|
|
|
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
|
|
|
|
if (fs_info->qgroup_rescan_progress.objectid <= oper->bytenr) {
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
|
|
|
|
ASSERT(is_fstree(oper->ref_root));
|
|
|
|
|
2014-07-17 19:39:00 +00:00
|
|
|
trace_btrfs_qgroup_account(oper);
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
switch (oper->type) {
|
|
|
|
case BTRFS_QGROUP_OPER_ADD_EXCL:
|
|
|
|
case BTRFS_QGROUP_OPER_SUB_EXCL:
|
|
|
|
ret = qgroup_excl_accounting(fs_info, oper);
|
|
|
|
break;
|
|
|
|
case BTRFS_QGROUP_OPER_ADD_SHARED:
|
|
|
|
case BTRFS_QGROUP_OPER_SUB_SHARED:
|
|
|
|
ret = qgroup_shared_accounting(trans, fs_info, oper);
|
|
|
|
break;
|
2014-07-17 19:39:01 +00:00
|
|
|
case BTRFS_QGROUP_OPER_SUB_SUBTREE:
|
|
|
|
ret = qgroup_subtree_accounting(trans, fs_info, oper);
|
|
|
|
break;
|
2014-05-14 00:30:47 +00:00
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
/*
|
|
|
|
* Needs to be called everytime we run delayed refs, even if there is an error
|
|
|
|
* in order to cleanup outstanding operations.
|
|
|
|
*/
|
|
|
|
int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_qgroup_operation *oper;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
while (!list_empty(&trans->qgroup_ref_list)) {
|
|
|
|
oper = list_first_entry(&trans->qgroup_ref_list,
|
|
|
|
struct btrfs_qgroup_operation, list);
|
|
|
|
list_del_init(&oper->list);
|
|
|
|
if (!ret || !trans->aborted)
|
|
|
|
ret = btrfs_qgroup_account(trans, fs_info, oper);
|
|
|
|
spin_lock(&fs_info->qgroup_op_lock);
|
|
|
|
rb_erase(&oper->n, &fs_info->qgroup_op_tree);
|
|
|
|
spin_unlock(&fs_info->qgroup_op_lock);
|
|
|
|
btrfs_put_tree_mod_seq(fs_info, &oper->elem);
|
|
|
|
kfree(oper);
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called from commit_transaction. Writes all changed qgroups to disk.
|
|
|
|
*/
|
|
|
|
int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root = fs_info->quota_root;
|
|
|
|
int ret = 0;
|
2013-04-25 16:04:52 +00:00
|
|
|
int start_rescan_worker = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
if (!quota_root)
|
|
|
|
goto out;
|
|
|
|
|
2013-04-25 16:04:52 +00:00
|
|
|
if (!fs_info->quota_enabled && fs_info->pending_quota_state)
|
|
|
|
start_rescan_worker = 1;
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
fs_info->quota_enabled = fs_info->pending_quota_state;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
while (!list_empty(&fs_info->dirty_qgroups)) {
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
qgroup = list_first_entry(&fs_info->dirty_qgroups,
|
|
|
|
struct btrfs_qgroup, dirty);
|
|
|
|
list_del_init(&qgroup->dirty);
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
ret = update_qgroup_info_item(trans, quota_root, qgroup);
|
2014-11-21 02:04:56 +00:00
|
|
|
if (ret)
|
|
|
|
fs_info->qgroup_flags |=
|
|
|
|
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
ret = update_qgroup_limit_item(trans, quota_root, qgroup);
|
2012-06-28 16:03:02 +00:00
|
|
|
if (ret)
|
|
|
|
fs_info->qgroup_flags |=
|
|
|
|
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
}
|
|
|
|
if (fs_info->quota_enabled)
|
|
|
|
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
|
|
|
|
else
|
|
|
|
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
|
|
|
|
ret = update_qgroup_status_item(trans, fs_info, quota_root);
|
|
|
|
if (ret)
|
|
|
|
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
|
2013-04-25 16:04:52 +00:00
|
|
|
if (!ret && start_rescan_worker) {
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
ret = qgroup_rescan_init(fs_info, 0, 1);
|
|
|
|
if (!ret) {
|
|
|
|
qgroup_rescan_zero_tracking(fs_info);
|
2014-02-28 02:46:16 +00:00
|
|
|
btrfs_queue_work(fs_info->qgroup_rescan_workers,
|
|
|
|
&fs_info->qgroup_rescan_work);
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
}
|
2013-04-25 16:04:52 +00:00
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
out:
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* copy the acounting information between qgroups. This is necessary when a
|
|
|
|
* snapshot or a subvolume is created
|
|
|
|
*/
|
|
|
|
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
|
|
|
|
struct btrfs_qgroup_inherit *inherit)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int i;
|
|
|
|
u64 *i_qgroups;
|
|
|
|
struct btrfs_root *quota_root = fs_info->quota_root;
|
|
|
|
struct btrfs_qgroup *srcgroup;
|
|
|
|
struct btrfs_qgroup *dstgroup;
|
|
|
|
u32 level_size = 0;
|
2013-04-07 10:50:19 +00:00
|
|
|
u64 nums;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
if (!fs_info->quota_enabled)
|
2013-04-07 10:50:16 +00:00
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-07 10:50:16 +00:00
|
|
|
if (!quota_root) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2013-04-07 10:50:19 +00:00
|
|
|
if (inherit) {
|
|
|
|
i_qgroups = (u64 *)(inherit + 1);
|
|
|
|
nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
|
|
|
|
2 * inherit->num_excl_copies;
|
|
|
|
for (i = 0; i < nums; ++i) {
|
|
|
|
srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
|
|
|
|
if (!srcgroup) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2014-11-11 12:18:22 +00:00
|
|
|
|
|
|
|
if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-04-07 10:50:19 +00:00
|
|
|
++i_qgroups;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
/*
|
|
|
|
* create a tracking group for the subvol itself
|
|
|
|
*/
|
|
|
|
ret = add_qgroup_item(trans, quota_root, objectid);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (srcid) {
|
|
|
|
struct btrfs_root *srcroot;
|
|
|
|
struct btrfs_key srckey;
|
|
|
|
|
|
|
|
srckey.objectid = srcid;
|
|
|
|
srckey.type = BTRFS_ROOT_ITEM_KEY;
|
|
|
|
srckey.offset = (u64)-1;
|
|
|
|
srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
|
|
|
|
if (IS_ERR(srcroot)) {
|
|
|
|
ret = PTR_ERR(srcroot);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2014-06-04 17:22:26 +00:00
|
|
|
level_size = srcroot->nodesize;
|
2012-06-28 16:03:02 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* add qgroup to all inherited groups
|
|
|
|
*/
|
|
|
|
if (inherit) {
|
|
|
|
i_qgroups = (u64 *)(inherit + 1);
|
|
|
|
for (i = 0; i < inherit->num_qgroups; ++i) {
|
|
|
|
ret = add_qgroup_relation_item(trans, quota_root,
|
|
|
|
objectid, *i_qgroups);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
ret = add_qgroup_relation_item(trans, quota_root,
|
|
|
|
*i_qgroups, objectid);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
++i_qgroups;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
|
|
|
|
dstgroup = add_qgroup_rb(fs_info, objectid);
|
2012-07-30 08:15:43 +00:00
|
|
|
if (IS_ERR(dstgroup)) {
|
|
|
|
ret = PTR_ERR(dstgroup);
|
2012-06-28 16:03:02 +00:00
|
|
|
goto unlock;
|
2012-07-30 08:15:43 +00:00
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2014-11-21 01:58:34 +00:00
|
|
|
if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
|
|
|
|
dstgroup->lim_flags = inherit->lim.flags;
|
|
|
|
dstgroup->max_rfer = inherit->lim.max_rfer;
|
|
|
|
dstgroup->max_excl = inherit->lim.max_excl;
|
|
|
|
dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
|
|
|
|
dstgroup->rsv_excl = inherit->lim.rsv_excl;
|
2014-11-21 02:01:41 +00:00
|
|
|
|
|
|
|
ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
|
|
|
|
if (ret) {
|
|
|
|
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
btrfs_info(fs_info, "unable to update quota limit for %llu",
|
|
|
|
dstgroup->qgroupid);
|
|
|
|
goto unlock;
|
|
|
|
}
|
2014-11-21 01:58:34 +00:00
|
|
|
}
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
if (srcid) {
|
|
|
|
srcgroup = find_qgroup_rb(fs_info, srcid);
|
2012-09-15 00:06:30 +00:00
|
|
|
if (!srcgroup)
|
2012-06-28 16:03:02 +00:00
|
|
|
goto unlock;
|
2014-05-14 00:30:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We call inherit after we clone the root in order to make sure
|
|
|
|
* our counts don't go crazy, so at this point the only
|
|
|
|
* difference between the two roots should be the root node.
|
|
|
|
*/
|
|
|
|
dstgroup->rfer = srcgroup->rfer;
|
|
|
|
dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
|
|
|
|
dstgroup->excl = level_size;
|
|
|
|
dstgroup->excl_cmpr = level_size;
|
2012-06-28 16:03:02 +00:00
|
|
|
srcgroup->excl = level_size;
|
|
|
|
srcgroup->excl_cmpr = level_size;
|
2014-11-21 01:14:38 +00:00
|
|
|
|
|
|
|
/* inherit the limit info */
|
|
|
|
dstgroup->lim_flags = srcgroup->lim_flags;
|
|
|
|
dstgroup->max_rfer = srcgroup->max_rfer;
|
|
|
|
dstgroup->max_excl = srcgroup->max_excl;
|
|
|
|
dstgroup->rsv_rfer = srcgroup->rsv_rfer;
|
|
|
|
dstgroup->rsv_excl = srcgroup->rsv_excl;
|
|
|
|
|
2012-06-28 16:03:02 +00:00
|
|
|
qgroup_dirty(fs_info, dstgroup);
|
|
|
|
qgroup_dirty(fs_info, srcgroup);
|
|
|
|
}
|
|
|
|
|
2012-09-15 00:06:30 +00:00
|
|
|
if (!inherit)
|
2012-06-28 16:03:02 +00:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
i_qgroups = (u64 *)(inherit + 1);
|
|
|
|
for (i = 0; i < inherit->num_qgroups; ++i) {
|
|
|
|
ret = add_relation_rb(quota_root->fs_info, objectid,
|
|
|
|
*i_qgroups);
|
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
|
|
|
++i_qgroups;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < inherit->num_ref_copies; ++i) {
|
|
|
|
struct btrfs_qgroup *src;
|
|
|
|
struct btrfs_qgroup *dst;
|
|
|
|
|
|
|
|
src = find_qgroup_rb(fs_info, i_qgroups[0]);
|
|
|
|
dst = find_qgroup_rb(fs_info, i_qgroups[1]);
|
|
|
|
|
|
|
|
if (!src || !dst) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst->rfer = src->rfer - level_size;
|
|
|
|
dst->rfer_cmpr = src->rfer_cmpr - level_size;
|
|
|
|
i_qgroups += 2;
|
|
|
|
}
|
|
|
|
for (i = 0; i < inherit->num_excl_copies; ++i) {
|
|
|
|
struct btrfs_qgroup *src;
|
|
|
|
struct btrfs_qgroup *dst;
|
|
|
|
|
|
|
|
src = find_qgroup_rb(fs_info, i_qgroups[0]);
|
|
|
|
dst = find_qgroup_rb(fs_info, i_qgroups[1]);
|
|
|
|
|
|
|
|
if (!src || !dst) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst->excl = src->excl + level_size;
|
|
|
|
dst->excl_cmpr = src->excl_cmpr + level_size;
|
|
|
|
i_qgroups += 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
out:
|
2013-04-07 10:50:16 +00:00
|
|
|
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
2012-06-28 16:03:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
|
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
|
|
u64 ref_root = root->root_key.objectid;
|
|
|
|
int ret = 0;
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
|
|
|
|
if (!is_fstree(ref_root))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (num_bytes == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
quota_root = fs_info->quota_root;
|
|
|
|
if (!quota_root)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
qgroup = find_qgroup_rb(fs_info, ref_root);
|
|
|
|
if (!qgroup)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* in a first step, we check all affected qgroups if any limits would
|
|
|
|
* be exceeded
|
|
|
|
*/
|
2013-05-06 11:03:27 +00:00
|
|
|
ulist_reinit(fs_info->qgroup_ulist);
|
|
|
|
ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
|
2013-04-17 14:00:36 +00:00
|
|
|
(uintptr_t)qgroup, GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
ULIST_ITER_INIT(&uiter);
|
2013-05-06 11:03:27 +00:00
|
|
|
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
|
2012-06-28 16:03:02 +00:00
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
struct btrfs_qgroup_list *glist;
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
qg = u64_to_ptr(unode->aux);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
|
2015-02-06 15:26:52 +00:00
|
|
|
qg->reserved + (s64)qg->rfer + num_bytes >
|
2013-03-06 11:51:47 +00:00
|
|
|
qg->max_rfer) {
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = -EDQUOT;
|
2013-03-06 11:51:47 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
|
2015-02-06 15:26:52 +00:00
|
|
|
qg->reserved + (s64)qg->excl + num_bytes >
|
2013-03-06 11:51:47 +00:00
|
|
|
qg->max_excl) {
|
2012-06-28 16:03:02 +00:00
|
|
|
ret = -EDQUOT;
|
2013-03-06 11:51:47 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
2013-05-06 11:03:27 +00:00
|
|
|
ret = ulist_add(fs_info->qgroup_ulist,
|
|
|
|
glist->group->qgroupid,
|
2013-04-17 14:00:36 +00:00
|
|
|
(uintptr_t)glist->group, GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
}
|
2013-04-17 14:00:36 +00:00
|
|
|
ret = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
/*
|
|
|
|
* no limits exceeded, now record the reservation into all qgroups
|
|
|
|
*/
|
|
|
|
ULIST_ITER_INIT(&uiter);
|
2013-05-06 11:03:27 +00:00
|
|
|
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
|
2012-06-28 16:03:02 +00:00
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
qg = u64_to_ptr(unode->aux);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2015-02-06 15:26:52 +00:00
|
|
|
qg->reserved += num_bytes;
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
|
|
|
|
{
|
|
|
|
struct btrfs_root *quota_root;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
|
|
|
struct ulist_node *unode;
|
|
|
|
struct ulist_iterator uiter;
|
|
|
|
u64 ref_root = root->root_key.objectid;
|
2013-04-17 14:00:36 +00:00
|
|
|
int ret = 0;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
if (!is_fstree(ref_root))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (num_bytes == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
|
|
|
|
quota_root = fs_info->quota_root;
|
|
|
|
if (!quota_root)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
qgroup = find_qgroup_rb(fs_info, ref_root);
|
|
|
|
if (!qgroup)
|
|
|
|
goto out;
|
|
|
|
|
2013-05-06 11:03:27 +00:00
|
|
|
ulist_reinit(fs_info->qgroup_ulist);
|
|
|
|
ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
|
2013-04-17 14:00:36 +00:00
|
|
|
(uintptr_t)qgroup, GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
ULIST_ITER_INIT(&uiter);
|
2013-05-06 11:03:27 +00:00
|
|
|
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
|
2012-06-28 16:03:02 +00:00
|
|
|
struct btrfs_qgroup *qg;
|
|
|
|
struct btrfs_qgroup_list *glist;
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
qg = u64_to_ptr(unode->aux);
|
2012-06-28 16:03:02 +00:00
|
|
|
|
2015-02-06 15:26:52 +00:00
|
|
|
qg->reserved -= num_bytes;
|
2012-06-28 16:03:02 +00:00
|
|
|
|
|
|
|
list_for_each_entry(glist, &qg->groups, next_group) {
|
2013-05-06 11:03:27 +00:00
|
|
|
ret = ulist_add(fs_info->qgroup_ulist,
|
|
|
|
glist->group->qgroupid,
|
2013-04-17 14:00:36 +00:00
|
|
|
(uintptr_t)glist->group, GFP_ATOMIC);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2012-06-28 16:03:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
|
|
|
|
{
|
|
|
|
if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
|
|
|
|
return;
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_err(trans->root->fs_info,
|
|
|
|
"qgroups not uptodate in trans handle %p: list is%s empty, "
|
|
|
|
"seq is %#x.%x",
|
2012-06-28 16:03:02 +00:00
|
|
|
trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
|
2013-04-24 16:57:33 +00:00
|
|
|
(u32)(trans->delayed_ref_elem.seq >> 32),
|
|
|
|
(u32)trans->delayed_ref_elem.seq);
|
2012-06-28 16:03:02 +00:00
|
|
|
BUG();
|
|
|
|
}
|
2013-04-25 16:04:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* returns < 0 on error, 0 when more leafs are to be scanned.
|
2015-02-27 08:24:24 +00:00
|
|
|
* returns 1 when done.
|
2013-04-25 16:04:51 +00:00
|
|
|
*/
|
|
|
|
static int
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
2014-05-14 00:30:47 +00:00
|
|
|
struct btrfs_trans_handle *trans, struct ulist *qgroups,
|
|
|
|
struct ulist *tmp, struct extent_buffer *scratch_leaf)
|
2013-04-25 16:04:51 +00:00
|
|
|
{
|
|
|
|
struct btrfs_key found;
|
|
|
|
struct ulist *roots = NULL;
|
2015-02-25 14:47:32 +00:00
|
|
|
struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
|
2014-05-14 00:30:47 +00:00
|
|
|
u64 num_bytes;
|
2013-04-25 16:04:51 +00:00
|
|
|
u64 seq;
|
2014-05-14 00:30:47 +00:00
|
|
|
int new_roots;
|
2013-04-25 16:04:51 +00:00
|
|
|
int slot;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
path->leave_spinning = 1;
|
|
|
|
mutex_lock(&fs_info->qgroup_rescan_lock);
|
|
|
|
ret = btrfs_search_slot_for_read(fs_info->extent_root,
|
|
|
|
&fs_info->qgroup_rescan_progress,
|
|
|
|
path, 1, 0);
|
|
|
|
|
|
|
|
pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
|
2013-08-20 11:20:07 +00:00
|
|
|
fs_info->qgroup_rescan_progress.objectid,
|
2013-04-25 16:04:51 +00:00
|
|
|
fs_info->qgroup_rescan_progress.type,
|
2013-08-20 11:20:07 +00:00
|
|
|
fs_info->qgroup_rescan_progress.offset, ret);
|
2013-04-25 16:04:51 +00:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
/*
|
|
|
|
* The rescan is about to end, we will not be scanning any
|
|
|
|
* further blocks. We cannot unset the RESCAN flag here, because
|
|
|
|
* we want to commit the transaction if everything went well.
|
|
|
|
* To make the live accounting work in this phase, we set our
|
|
|
|
* scan progress pointer such that every real extent objectid
|
|
|
|
* will be smaller.
|
|
|
|
*/
|
|
|
|
fs_info->qgroup_rescan_progress.objectid = (u64)-1;
|
|
|
|
btrfs_release_path(path);
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found,
|
|
|
|
btrfs_header_nritems(path->nodes[0]) - 1);
|
|
|
|
fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
|
|
|
|
|
|
|
|
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
|
|
|
|
memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
|
|
|
|
slot = path->slots[0];
|
|
|
|
btrfs_release_path(path);
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
|
|
|
|
for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
|
|
|
|
btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
|
2014-01-23 21:45:10 +00:00
|
|
|
if (found.type != BTRFS_EXTENT_ITEM_KEY &&
|
|
|
|
found.type != BTRFS_METADATA_ITEM_KEY)
|
2013-04-25 16:04:51 +00:00
|
|
|
continue;
|
2014-01-23 21:45:10 +00:00
|
|
|
if (found.type == BTRFS_METADATA_ITEM_KEY)
|
2014-06-04 17:22:26 +00:00
|
|
|
num_bytes = fs_info->extent_root->nodesize;
|
2014-01-23 21:45:10 +00:00
|
|
|
else
|
|
|
|
num_bytes = found.offset;
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
ulist_reinit(qgroups);
|
|
|
|
ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
|
|
|
|
&roots);
|
2013-04-25 16:04:51 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
seq = fs_info->qgroup_seq;
|
|
|
|
fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
new_roots = 0;
|
|
|
|
ret = qgroup_calc_old_refcnt(fs_info, 0, tmp, roots, qgroups,
|
|
|
|
seq, &new_roots, 1);
|
|
|
|
if (ret < 0) {
|
2013-04-25 16:04:51 +00:00
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
ulist_free(roots);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-05-14 00:30:47 +00:00
|
|
|
ret = qgroup_adjust_counters(fs_info, 0, num_bytes, qgroups,
|
|
|
|
seq, 0, new_roots, 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
ulist_free(roots);
|
|
|
|
goto out;
|
2013-04-25 16:04:51 +00:00
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
ulist_free(roots);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-28 02:46:19 +00:00
|
|
|
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
|
2013-04-25 16:04:51 +00:00
|
|
|
{
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
|
|
|
|
qgroup_rescan_work);
|
2013-04-25 16:04:51 +00:00
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_trans_handle *trans = NULL;
|
2014-05-14 00:30:47 +00:00
|
|
|
struct ulist *tmp = NULL, *qgroups = NULL;
|
2013-04-25 16:04:51 +00:00
|
|
|
struct extent_buffer *scratch_leaf = NULL;
|
|
|
|
int err = -ENOMEM;
|
2015-02-27 08:24:25 +00:00
|
|
|
int ret = 0;
|
2013-04-25 16:04:51 +00:00
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
goto out;
|
2014-05-14 00:30:47 +00:00
|
|
|
qgroups = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!qgroups)
|
|
|
|
goto out;
|
2013-04-25 16:04:51 +00:00
|
|
|
tmp = ulist_alloc(GFP_NOFS);
|
|
|
|
if (!tmp)
|
|
|
|
goto out;
|
|
|
|
scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
|
|
|
|
if (!scratch_leaf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
while (!err) {
|
|
|
|
trans = btrfs_start_transaction(fs_info->fs_root, 0);
|
|
|
|
if (IS_ERR(trans)) {
|
|
|
|
err = PTR_ERR(trans);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!fs_info->quota_enabled) {
|
|
|
|
err = -EINTR;
|
|
|
|
} else {
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
err = qgroup_rescan_leaf(fs_info, path, trans,
|
2014-05-14 00:30:47 +00:00
|
|
|
qgroups, tmp, scratch_leaf);
|
2013-04-25 16:04:51 +00:00
|
|
|
}
|
|
|
|
if (err > 0)
|
|
|
|
btrfs_commit_transaction(trans, fs_info->fs_root);
|
|
|
|
else
|
|
|
|
btrfs_end_transaction(trans, fs_info->fs_root);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(scratch_leaf);
|
2014-05-14 00:30:47 +00:00
|
|
|
ulist_free(qgroups);
|
2014-05-20 13:23:31 +00:00
|
|
|
ulist_free(tmp);
|
2013-04-25 16:04:51 +00:00
|
|
|
btrfs_free_path(path);
|
|
|
|
|
|
|
|
mutex_lock(&fs_info->qgroup_rescan_lock);
|
|
|
|
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
|
|
|
|
2015-02-27 08:24:24 +00:00
|
|
|
if (err > 0 &&
|
2013-04-25 16:04:51 +00:00
|
|
|
fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
|
|
|
|
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
} else if (err < 0) {
|
|
|
|
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
|
|
|
}
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
|
2015-02-27 08:24:25 +00:00
|
|
|
/*
|
|
|
|
* only update status, since the previous part has alreay updated the
|
|
|
|
* qgroup info.
|
|
|
|
*/
|
|
|
|
trans = btrfs_start_transaction(fs_info->quota_root, 1);
|
|
|
|
if (IS_ERR(trans)) {
|
|
|
|
err = PTR_ERR(trans);
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"fail to start transaction for status update: %d\n",
|
|
|
|
err);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
|
|
|
|
if (ret < 0) {
|
|
|
|
err = ret;
|
2015-02-27 08:24:27 +00:00
|
|
|
btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
|
2015-02-27 08:24:25 +00:00
|
|
|
}
|
|
|
|
btrfs_end_transaction(trans, fs_info->quota_root);
|
|
|
|
|
2013-04-25 16:04:51 +00:00
|
|
|
if (err >= 0) {
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_info(fs_info, "qgroup scan completed%s",
|
2015-02-27 08:24:24 +00:00
|
|
|
err > 0 ? " (inconsistency flag cleared)" : "");
|
2013-04-25 16:04:51 +00:00
|
|
|
} else {
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_err(fs_info, "qgroup scan failed with %d", err);
|
2013-04-25 16:04:51 +00:00
|
|
|
}
|
2013-05-06 19:14:17 +00:00
|
|
|
|
2015-02-27 08:24:25 +00:00
|
|
|
done:
|
2013-05-06 19:14:17 +00:00
|
|
|
complete_all(&fs_info->qgroup_rescan_completion);
|
2013-04-25 16:04:51 +00:00
|
|
|
}
|
|
|
|
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
/*
|
|
|
|
* Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
|
|
|
|
* memory required for the rescan context.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
|
|
|
|
int init_flags)
|
2013-04-25 16:04:51 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
if (!init_flags &&
|
|
|
|
(!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
|
|
|
|
!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
2013-04-25 16:04:51 +00:00
|
|
|
|
|
|
|
mutex_lock(&fs_info->qgroup_rescan_lock);
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
|
|
|
|
if (init_flags) {
|
|
|
|
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
|
|
|
|
ret = -EINPROGRESS;
|
|
|
|
else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
2013-04-25 16:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(&fs_info->qgroup_rescan_progress, 0,
|
|
|
|
sizeof(fs_info->qgroup_rescan_progress));
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
|
|
|
|
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
|
2013-05-06 19:14:17 +00:00
|
|
|
init_completion(&fs_info->qgroup_rescan_completion);
|
2013-04-25 16:04:51 +00:00
|
|
|
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
memset(&fs_info->qgroup_rescan_work, 0,
|
|
|
|
sizeof(fs_info->qgroup_rescan_work));
|
2014-02-28 02:46:16 +00:00
|
|
|
btrfs_init_work(&fs_info->qgroup_rescan_work,
|
Btrfs: fix task hang under heavy compressed write
This has been reported and discussed for a long time, and this hang occurs in
both 3.15 and 3.16.
Btrfs now migrates to use kernel workqueue, but it introduces this hang problem.
Btrfs has a kind of work queued as an ordered way, which means that its
ordered_func() must be processed in the way of FIFO, so it usually looks like --
normal_work_helper(arg)
work = container_of(arg, struct btrfs_work, normal_work);
work->func() <---- (we name it work X)
for ordered_work in wq->ordered_list
ordered_work->ordered_func()
ordered_work->ordered_free()
The hang is a rare case, first when we find free space, we get an uncached block
group, then we go to read its free space cache inode for free space information,
so it will
file a readahead request
btrfs_readpages()
for page that is not in page cache
__do_readpage()
submit_extent_page()
btrfs_submit_bio_hook()
btrfs_bio_wq_end_io()
submit_bio()
end_workqueue_bio() <--(ret by the 1st endio)
queue a work(named work Y) for the 2nd
also the real endio()
So the hang occurs when work Y's work_struct and work X's work_struct happens
to share the same address.
A bit more explanation,
A,B,C -- struct btrfs_work
arg -- struct work_struct
kthread:
worker_thread()
pick up a work_struct from @worklist
process_one_work(arg)
worker->current_work = arg; <-- arg is A->normal_work
worker->current_func(arg)
normal_work_helper(arg)
A = container_of(arg, struct btrfs_work, normal_work);
A->func()
A->ordered_func()
A->ordered_free() <-- A gets freed
B->ordered_func()
submit_compressed_extents()
find_free_extent()
load_free_space_inode()
... <-- (the above readhead stack)
end_workqueue_bio()
btrfs_queue_work(work C)
B->ordered_free()
As if work A has a high priority in wq->ordered_list and there are more ordered
works queued after it, such as B->ordered_func(), its memory could have been
freed before normal_work_helper() returns, which means that kernel workqueue
code worker_thread() still has worker->current_work pointer to be work
A->normal_work's, ie. arg's address.
Meanwhile, work C is allocated after work A is freed, work C->normal_work
and work A->normal_work are likely to share the same address(I confirmed this
with ftrace output, so I'm not just guessing, it's rare though).
When another kthread picks up work C->normal_work to process, and finds our
kthread is processing it(see find_worker_executing_work()), it'll think
work C as a collision and skip then, which ends up nobody processing work C.
So the situation is that our kthread is waiting forever on work C.
Besides, there're other cases that can lead to deadlock, but the real problem
is that all btrfs workqueue shares one work->func, -- normal_work_helper,
so this makes each workqueue to have its own helper function, but only a
wraper pf normal_work_helper.
With this patch, I no long hit the above hang.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <clm@fb.com>
2014-08-15 15:36:53 +00:00
|
|
|
btrfs_qgroup_rescan_helper,
|
2014-02-28 02:46:16 +00:00
|
|
|
btrfs_qgroup_rescan_worker, NULL, NULL);
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
err:
|
2013-12-20 16:37:06 +00:00
|
|
|
btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
struct btrfs_qgroup *qgroup;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
2013-04-25 16:04:51 +00:00
|
|
|
/* clear all current qgroup tracking information */
|
|
|
|
for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
|
|
|
|
qgroup = rb_entry(n, struct btrfs_qgroup, node);
|
|
|
|
qgroup->rfer = 0;
|
|
|
|
qgroup->rfer_cmpr = 0;
|
|
|
|
qgroup->excl = 0;
|
|
|
|
qgroup->excl_cmpr = 0;
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
}
|
2013-04-25 16:04:51 +00:00
|
|
|
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
int
|
|
|
|
btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct btrfs_trans_handle *trans;
|
|
|
|
|
|
|
|
ret = qgroup_rescan_init(fs_info, 0, 1);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have set the rescan_progress to 0, which means no more
|
|
|
|
* delayed refs will be accounted by btrfs_qgroup_account_ref.
|
|
|
|
* However, btrfs_qgroup_account_ref may be right after its call
|
|
|
|
* to btrfs_find_all_roots, in which case it would still do the
|
|
|
|
* accounting.
|
|
|
|
* To solve this, we're committing the transaction, which will
|
|
|
|
* ensure we run all delayed refs and only after that, we are
|
|
|
|
* going to clear all tracking information for a clean start.
|
|
|
|
*/
|
|
|
|
|
|
|
|
trans = btrfs_join_transaction(fs_info->fs_root);
|
|
|
|
if (IS_ERR(trans)) {
|
|
|
|
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
|
|
|
return PTR_ERR(trans);
|
|
|
|
}
|
|
|
|
ret = btrfs_commit_transaction(trans, fs_info->fs_root);
|
|
|
|
if (ret) {
|
|
|
|
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
qgroup_rescan_zero_tracking(fs_info);
|
|
|
|
|
2014-02-28 02:46:16 +00:00
|
|
|
btrfs_queue_work(fs_info->qgroup_rescan_workers,
|
|
|
|
&fs_info->qgroup_rescan_work);
|
2013-04-25 16:04:51 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2013-05-06 19:14:17 +00:00
|
|
|
|
|
|
|
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
int running;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&fs_info->qgroup_rescan_lock);
|
|
|
|
spin_lock(&fs_info->qgroup_lock);
|
|
|
|
running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
|
|
|
spin_unlock(&fs_info->qgroup_lock);
|
|
|
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
|
|
|
|
|
|
|
if (running)
|
|
|
|
ret = wait_for_completion_interruptible(
|
|
|
|
&fs_info->qgroup_rescan_completion);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this is only called from open_ctree where we're still single threaded, thus
|
|
|
|
* locking is omitted here.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
|
2014-02-28 02:46:16 +00:00
|
|
|
btrfs_queue_work(fs_info->qgroup_rescan_workers,
|
|
|
|
&fs_info->qgroup_rescan_work);
|
Btrfs: fix qgroup rescan resume on mount
When called during mount, we cannot start the rescan worker thread until
open_ctree is done. This commit restuctures the qgroup rescan internals to
enable a clean deferral of the rescan resume operation.
First of all, the struct qgroup_rescan is removed, saving us a malloc and
some initialization synchronizations problems. Its only element (the worker
struct) now lives within fs_info just as the rest of the rescan code.
Then setting up a rescan worker is split into several reusable stages.
Currently we have three different rescan startup scenarios:
(A) rescan ioctl
(B) rescan resume by mount
(C) rescan by quota enable
Each case needs its own combination of the four following steps:
(1) set the progress [A, C: zero; B: state of umount]
(2) commit the transaction [A]
(3) set the counters [A, C: zero; B: state of umount]
(4) start worker [A, B, C]
qgroup_rescan_init does step (1). There's no extra function added to commit
a transaction, we've got that already. qgroup_rescan_zero_tracking does
step (3). Step (4) is nothing more than a call to the generic
btrfs_queue_worker.
We also get rid of a double check for the rescan progress during
btrfs_qgroup_account_ref, which is no longer required due to having step 2
from the list above.
As a side effect, this commit prepares to move the rescan start code from
btrfs_run_qgroups (which is run during commit) to a less time critical
section.
Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-05-28 15:47:24 +00:00
|
|
|
}
|