Hi Chris, I suspect the async_delalloc_pages limit checking in btrfs/inode.c atomic_read(&root->fs_info->async_delalloc_pages) < 5 * 1042 * 1024 never evaluate to FALSE because 5M pages are 20GB which is super large. I tried to fix it with the following patch. I increased the limit a bit, however still worry that the "corrected" limit is too small to create some regressions. What do you think? Thanks, Fengguang --- fs/btrfs/inode.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) --- linux.orig/fs/btrfs/inode.c 2009-11-07 19:28:07.000000000 +0800 +++ linux/fs/btrfs/inode.c 2009-11-07 19:28:08.000000000 +0800 @@ -50,6 +50,8 @@ #include "compression.h" #include "locking.h" +#define BTRFS_ASYNC_DELALLOC_PAGES_LIMIT (64 << (20-PAGE_CACHE_SHIFT)) + struct btrfs_iget_args { u64 ino; struct btrfs_root *root; @@ -856,7 +858,7 @@ static noinline void async_cow_submit(st atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); if (atomic_read(&root->fs_info->async_delalloc_pages) < - 5 * 1042 * 1024 && + BTRFS_ASYNC_DELALLOC_PAGES_LIMIT / 2 && waitqueue_active(&root->fs_info->async_submit_wait)) wake_up(&root->fs_info->async_submit_wait); @@ -879,7 +881,6 @@ static int cow_file_range_async(struct i struct btrfs_root *root = BTRFS_I(inode)->root; unsigned long nr_pages; u64 cur_end; - int limit = 10 * 1024 * 1042; clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); @@ -910,10 +911,11 @@ static int cow_file_range_async(struct i btrfs_queue_worker(&root->fs_info->delalloc_workers, &async_cow->work); - if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { + if (atomic_read(&root->fs_info->async_delalloc_pages) > + BTRFS_ASYNC_DELALLOC_PAGES_LIMIT) { wait_event(root->fs_info->async_submit_wait, (atomic_read(&root->fs_info->async_delalloc_pages) < - limit)); + BTRFS_ASYNC_DELALLOC_PAGES_LIMIT)); } while (atomic_read(&root->fs_info->async_submit_draining) && -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html