Re: [syzbot] [exfat?] possible deadlock in fat_count_free_clusters

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Jens Axboe <axboe@xxxxxxxxx> writes:

> On 11/19/24 5:10 AM, Ming Lei wrote:
>> On Tue, Nov 12, 2024 at 12:44 AM OGAWA Hirofumi
>> <hirofumi@xxxxxxxxxxxxxxxxxx> wrote:
>>>
>>> Hi,
>>>
>>> syzbot <syzbot+a5d8c609c02f508672cc@xxxxxxxxxxxxxxxxxxxxxxxxx> writes:
>>>
>>>> syzbot found the following issue on:
>>>>
>>>> HEAD commit:    929beafbe7ac Add linux-next specific files for 20241108
>>>> git tree:       linux-next
>>>> console output: https://syzkaller.appspot.com/x/log.txt?x=1621bd87980000
>>>> kernel config:  https://syzkaller.appspot.com/x/.config?x=75175323f2078363
>>>> dashboard link: https://syzkaller.appspot.com/bug?extid=a5d8c609c02f508672cc
>>>> compiler:       Debian clang version 15.0.6, GNU ld (GNU Binutils for Debian) 2.40

[...]

>> 
>> Looks fine,
>> 
>> Reviewed-by: Ming Lei <ming.lei@xxxxxxxxxx>
>
> The patch doesn't apply to the for-6.13/block tree, Ogawa can you send
> an updated one please?

Updated the patch for linux-block:for-6.13/block. Please apply.

Thanks.


From: OGAWA Hirofumi <hirofumi@xxxxxxxxxxxxxxxxxx>
Subject: [PATCH] loop: Fix ABBA locking race
Date: Tue, 19 Nov 2024 23:42:23 +0900

Current loop calls vfs_statfs() while holding the q->limits_lock. If
FS takes some locking in vfs_statfs callback, this may lead to ABBA
locking bug (at least, FAT fs has this issue actually).

So this patch calls vfs_statfs() outside q->limits_locks instead,
because looks like no reason to hold q->limits_locks while getting
discord configs.

Chain exists of:
  &sbi->fat_lock --> &q->q_usage_counter(io)#17 --> &q->limits_lock

 Possible unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(&q->limits_lock);
                               lock(&q->q_usage_counter(io)#17);
                               lock(&q->limits_lock);
  lock(&sbi->fat_lock);

 *** DEADLOCK ***

Reported-by: syzbot+a5d8c609c02f508672cc@xxxxxxxxxxxxxxxxxxxxxxxxx
Closes: https://syzkaller.appspot.com/bug?extid=a5d8c609c02f508672cc
Reviewed-by: Ming Lei <ming.lei@xxxxxxxxxx>
Signed-off-by: OGAWA Hirofumi <hirofumi@xxxxxxxxxxxxxxxxxx>
---
 drivers/block/loop.c |   30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fe9bb4f..8f6761c 100644
--- a/drivers/block/loop.c	2024-11-19 23:37:54.760751014 +0900
+++ b/drivers/block/loop.c	2024-11-19 23:38:55.645461107 +0900
@@ -770,12 +770,11 @@ static void loop_sysfs_exit(struct loop_
 				   &loop_attribute_group);
 }
 
-static void loop_config_discard(struct loop_device *lo,
-		struct queue_limits *lim)
+static void loop_get_discard_config(struct loop_device *lo,
+				    u32 *granularity, u32 *max_discard_sectors)
 {
 	struct file *file = lo->lo_backing_file;
 	struct inode *inode = file->f_mapping->host;
-	u32 granularity = 0, max_discard_sectors = 0;
 	struct kstatfs sbuf;
 
 	/*
@@ -788,24 +787,17 @@ static void loop_config_discard(struct l
 	if (S_ISBLK(inode->i_mode)) {
 		struct block_device *bdev = I_BDEV(inode);
 
-		max_discard_sectors = bdev_write_zeroes_sectors(bdev);
-		granularity = bdev_discard_granularity(bdev);
+		*max_discard_sectors = bdev_write_zeroes_sectors(bdev);
+		*granularity = bdev_discard_granularity(bdev);
 
 	/*
 	 * We use punch hole to reclaim the free space used by the
 	 * image a.k.a. discard.
 	 */
 	} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
-		max_discard_sectors = UINT_MAX >> 9;
-		granularity = sbuf.f_bsize;
+		*max_discard_sectors = UINT_MAX >> 9;
+		*granularity = sbuf.f_bsize;
 	}
-
-	lim->max_hw_discard_sectors = max_discard_sectors;
-	lim->max_write_zeroes_sectors = max_discard_sectors;
-	if (max_discard_sectors)
-		lim->discard_granularity = granularity;
-	else
-		lim->discard_granularity = 0;
 }
 
 struct loop_worker {
@@ -991,6 +983,7 @@ static int loop_reconfigure_limits(struc
 	struct inode *inode = file->f_mapping->host;
 	struct block_device *backing_bdev = NULL;
 	struct queue_limits lim;
+	u32 granularity = 0, max_discard_sectors = 0;
 
 	if (S_ISBLK(inode->i_mode))
 		backing_bdev = I_BDEV(inode);
@@ -1000,6 +993,8 @@ static int loop_reconfigure_limits(struc
 	if (!bsize)
 		bsize = loop_default_blocksize(lo, backing_bdev);
 
+	loop_get_discard_config(lo, &granularity, &max_discard_sectors);
+
 	lim = queue_limits_start_update(lo->lo_queue);
 	lim.logical_block_size = bsize;
 	lim.physical_block_size = bsize;
@@ -1009,7 +1004,12 @@ static int loop_reconfigure_limits(struc
 		lim.features |= BLK_FEAT_WRITE_CACHE;
 	if (backing_bdev && !bdev_nonrot(backing_bdev))
 		lim.features |= BLK_FEAT_ROTATIONAL;
-	loop_config_discard(lo, &lim);
+	lim.max_hw_discard_sectors = max_discard_sectors;
+	lim.max_write_zeroes_sectors = max_discard_sectors;
+	if (max_discard_sectors)
+		lim.discard_granularity = granularity;
+	else
+		lim.discard_granularity = 0;
 	return queue_limits_commit_update(lo->lo_queue, &lim);
 }
 
_
-- 
OGAWA Hirofumi <hirofumi@xxxxxxxxxxxxxxxxxx>





[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux