Introduce struct clustered_alloc_info to manage parameters related to
clustered allocation. By separating clustered_alloc_info and
find_free_extent_ctl, we can introduce other allocation policy. One can
access per-allocation policy private information from "alloc_info" of
struct find_free_extent_ctl.
Signed-off-by: Naohiro Aota <naohiro.aota@xxxxxxx>
---
fs/btrfs/extent-tree.c | 99 +++++++++++++++++++++++++-----------------
1 file changed, 59 insertions(+), 40 deletions(-)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b1f52eee24fe..8124a6461043 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3456,9 +3456,6 @@ struct find_free_extent_ctl {
/* Where to start the search inside the bg */
u64 search_start;
- /* For clustered allocation */
- u64 empty_cluster;
-
bool have_caching_bg;
bool orig_have_caching_bg;
@@ -3470,18 +3467,6 @@ struct find_free_extent_ctl {
*/
int loop;
- /*
- * Whether we're refilling a cluster, if true we need to re-search
- * current block group but don't try to refill the cluster again.
- */
- bool retry_clustered;
-
- /*
- * Whether we're updating free space cache, if true we need to re-search
- * current block group but don't try updating free space cache again.
- */
- bool retry_unclustered;
-
/* If current block group is cached */
int cached;
@@ -3499,8 +3484,28 @@ struct find_free_extent_ctl {
/* Allocation policy */
enum btrfs_extent_allocation_policy policy;
+ void *alloc_info;
};
+struct clustered_alloc_info {
+ /* For clustered allocation */
+ u64 empty_cluster;
+
+ /*
+ * Whether we're refilling a cluster, if true we need to re-search
+ * current block group but don't try to refill the cluster again.
+ */
+ bool retry_clustered;
+
+ /*
+ * Whether we're updating free space cache, if true we need to re-search
+ * current block group but don't try updating free space cache again.
+ */
+ bool retry_unclustered;
+
+ struct btrfs_free_cluster *last_ptr;
+ bool use_cluster;
+};
/*
* Helper function for find_free_extent().
@@ -3516,6 +3521,7 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
struct btrfs_block_group **cluster_bg_ret)
{
struct btrfs_block_group *cluster_bg;
+ struct clustered_alloc_info *clustered = ffe_ctl->alloc_info;
u64 aligned_cluster;
u64 offset;
int ret;
@@ -3572,7 +3578,7 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
}
aligned_cluster = max_t(u64,
- ffe_ctl->empty_cluster + ffe_ctl->empty_size,
+ clustered->empty_cluster + ffe_ctl->empty_size,
bg->full_stripe_len);
ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start,
ffe_ctl->num_bytes, aligned_cluster);
@@ -3591,12 +3597,12 @@ static int find_free_extent_clustered(struct btrfs_block_group *bg,
return 0;
}
} else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
- !ffe_ctl->retry_clustered) {
+ !clustered->retry_clustered) {
spin_unlock(&last_ptr->refill_lock);
- ffe_ctl->retry_clustered = true;
+ clustered->retry_clustered = true;
btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
- ffe_ctl->empty_cluster + ffe_ctl->empty_size);
+ clustered->empty_cluster + ffe_ctl->empty_size);
return -EAGAIN;
}
/*
@@ -3618,6 +3624,7 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
struct btrfs_free_cluster *last_ptr,
struct find_free_extent_ctl *ffe_ctl)
{
+ struct clustered_alloc_info *clustered = ffe_ctl->alloc_info;
u64 offset;
/*
@@ -3636,7 +3643,7 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
free_space_ctl = bg->free_space_ctl;
spin_lock(&free_space_ctl->tree_lock);
if (free_space_ctl->free_space <
- ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
+ ffe_ctl->num_bytes + clustered->empty_cluster +
ffe_ctl->empty_size) {
ffe_ctl->total_free_space = max_t(u64,
ffe_ctl->total_free_space,
@@ -3660,11 +3667,11 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
* If @retry_unclustered is true then we've already waited on this
* block group once and should move on to the next block group.
*/
- if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
+ if (!offset && !clustered->retry_unclustered && !ffe_ctl->cached &&
ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
ffe_ctl->empty_size);
- ffe_ctl->retry_unclustered = true;
+ clustered->retry_unclustered = true;
return -EAGAIN;
} else if (!offset) {
return 1;
@@ -3685,6 +3692,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
bool full_search, bool use_cluster)
{
struct btrfs_root *root = fs_info->extent_root;
+ struct clustered_alloc_info *clustered = ffe_ctl->alloc_info;
int ret;
if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
@@ -3774,10 +3782,10 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
* no empty_cluster.
*/
if (ffe_ctl->empty_size == 0 &&
- ffe_ctl->empty_cluster == 0)
+ clustered->empty_cluster == 0)
return -ENOSPC;
ffe_ctl->empty_size = 0;
- ffe_ctl->empty_cluster = 0;
+ clustered->empty_cluster = 0;
}
return 1;
}
@@ -3816,11 +3824,10 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
{
int ret = 0;
int cache_block_group_error = 0;
- struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group *block_group = NULL;
struct find_free_extent_ctl ffe_ctl = {0};
struct btrfs_space_info *space_info;
- bool use_cluster = true;
+ struct clustered_alloc_info *clustered = NULL;
bool full_search = false;
WARN_ON(num_bytes < fs_info->sectorsize);
@@ -3829,8 +3836,6 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
ffe_ctl.empty_size = empty_size;
ffe_ctl.flags = flags;
ffe_ctl.search_start = 0;
- ffe_ctl.retry_clustered = false;
- ffe_ctl.retry_unclustered = false;
ffe_ctl.delalloc = delalloc;
ffe_ctl.index = btrfs_bg_flags_to_raid_index(flags);
ffe_ctl.have_caching_bg = false;
@@ -3851,6 +3856,15 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
return -ENOSPC;
}
+ clustered = kzalloc(sizeof(*clustered), GFP_NOFS);
+ if (!clustered)
+ return -ENOMEM;