On Mon, Sep 23, 2024 at 3:48 PM Joanne Koong <joannelkoong@xxxxxxxxx> wrote: > > On Mon, Sep 23, 2024 at 3:34 AM Lai, Yi <yi1.lai@xxxxxxxxxxxxxxx> wrote: > > > > Hi Miklos Szeredi, > > > > Greetings! > > > > I used Syzkaller and found that there is WARNING in fuse_request_end in Linux-next tree - next-20240918. > > > > After bisection and the first bad commit is: > > " > > 5de8acb41c86 fuse: cleanup request queuing towards virtiofs > > " > > > > All detailed into can be found at: > > https://github.com/laifryiee/syzkaller_logs/tree/main/240922_114402_fuse_request_end > > Syzkaller repro code: > > https://github.com/laifryiee/syzkaller_logs/blob/main/240922_114402_fuse_request_end/repro.c > > Syzkaller repro syscall steps: > > https://github.com/laifryiee/syzkaller_logs/blob/main/240922_114402_fuse_request_end/repro.prog > > Syzkaller report: > > https://github.com/laifryiee/syzkaller_logs/blob/main/240922_114402_fuse_request_end/repro.report > > Kconfig(make olddefconfig): > > https://github.com/laifryiee/syzkaller_logs/blob/main/240922_114402_fuse_request_end/kconfig_origin > > Bisect info: > > https://github.com/laifryiee/syzkaller_logs/blob/main/240922_114402_fuse_request_end/bisect_info.log > > bzImage: > > https://github.com/laifryiee/syzkaller_logs/raw/main/240922_114402_fuse_request_end/bzImage_55bcd2e0d04c1171d382badef1def1fd04ef66c5 > > Issue dmesg: > > https://github.com/laifryiee/syzkaller_logs/blob/main/240922_114402_fuse_request_end/55bcd2e0d04c1171d382badef1def1fd04ef66c5_dmesg.log > > > > " > > [ 31.577123] ------------[ cut here ]------------ > > [ 31.578842] WARNING: CPU: 1 PID: 1186 at fs/fuse/dev.c:373 fuse_request_end+0x7d2/0x910 > > [ 31.581269] Modules linked in: > > [ 31.582553] CPU: 1 UID: 0 PID: 1186 Comm: repro Not tainted 6.11.0-next-20240918-55bcd2e0d04c #1 > > [ 31.584332] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 > > [ 31.586281] RIP: 0010:fuse_request_end+0x7d2/0x910 > > [ 31.587001] Code: ff 48 8b 7d d0 e8 ae 0f 72 ff e9 c2 fb ff ff e8 a4 0f 72 ff e9 e7 fb ff ff e8 3a 3b 0a ff 0f 0b e9 17 fa ff ff e8 2e 3b 0a ff <0f> 0b e9 c1 f9 ff ff 4c 89 ff e8 af 0f 72 ff e9 82 f8 ff ff e8 a5 > > [ 31.589442] RSP: 0018:ffff88802141f640 EFLAGS: 00010293 > > [ 31.590198] RAX: 0000000000000000 RBX: 0000000000000201 RCX: ffffffff825d5bb2 > > [ 31.591137] RDX: ffff888010b2a500 RSI: ffffffff825d61f2 RDI: 0000000000000001 > > [ 31.592072] RBP: ffff88802141f680 R08: 0000000000000000 R09: ffffed100356f28e > > [ 31.593010] R10: 0000000000000001 R11: 0000000000000001 R12: ffff88801ab79440 > > [ 31.594062] R13: ffff88801ab79470 R14: ffff88801dcaa000 R15: ffff88800d71fa00 > > [ 31.594820] FS: 00007f812eca2640(0000) GS:ffff88806c500000(0000) knlGS:0000000000000000 > > [ 31.595670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 > > [ 31.596299] CR2: 000055b7baa16b20 CR3: 00000000109b0002 CR4: 0000000000770ef0 > > [ 31.597054] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 > > [ 31.597850] DR3: 0000000000000000 DR6: 00000000ffff07f0 DR7: 0000000000000400 > > [ 31.598595] PKRU: 55555554 > > [ 31.598902] Call Trace: > > [ 31.599180] <TASK> > > [ 31.599439] ? show_regs+0x6d/0x80 > > [ 31.599805] ? __warn+0xf3/0x380 > > [ 31.600137] ? report_bug+0x25e/0x4b0 > > [ 31.600521] ? fuse_request_end+0x7d2/0x910 > > [ 31.600885] ? report_bug+0x2cb/0x4b0 > > [ 31.601204] ? fuse_request_end+0x7d2/0x910 > > [ 31.601564] ? fuse_request_end+0x7d3/0x910 > > [ 31.601956] ? handle_bug+0xf1/0x190 > > [ 31.602275] ? exc_invalid_op+0x3c/0x80 > > [ 31.602595] ? asm_exc_invalid_op+0x1f/0x30 > > [ 31.602959] ? fuse_request_end+0x192/0x910 > > [ 31.603301] ? fuse_request_end+0x7d2/0x910 > > [ 31.603643] ? fuse_request_end+0x7d2/0x910 > > [ 31.603988] ? do_raw_spin_unlock+0x15c/0x210 > > [ 31.604366] fuse_dev_queue_req+0x23c/0x2b0 > > [ 31.604714] fuse_send_one+0x1d1/0x360 > > [ 31.605031] fuse_simple_request+0x348/0xd30 > > [ 31.605385] ? lockdep_hardirqs_on+0x89/0x110 > > [ 31.605755] fuse_send_open+0x234/0x2f0 > > [ 31.606126] ? __pfx_fuse_send_open+0x10/0x10 > > [ 31.606487] ? kasan_save_track+0x18/0x40 > > [ 31.606834] ? lockdep_init_map_type+0x2df/0x810 > > [ 31.607227] ? __kasan_check_write+0x18/0x20 > > [ 31.607591] fuse_file_open+0x2bc/0x770 > > [ 31.607921] fuse_do_open+0x5d/0xe0 > > [ 31.608215] ? __sanitizer_cov_trace_const_cmp4+0x1a/0x20 > > [ 31.608681] fuse_dir_open+0x138/0x220 > > [ 31.609005] do_dentry_open+0x6be/0x1390 > > [ 31.609358] ? __sanitizer_cov_trace_const_cmp4+0x1a/0x20 > > [ 31.609861] ? __pfx_fuse_dir_open+0x10/0x10 > > [ 31.610240] vfs_open+0x87/0x3f0 > > [ 31.610523] ? may_open+0x205/0x430 > > [ 31.610834] path_openat+0x23b7/0x32d0 > > [ 31.611161] ? __pfx_path_openat+0x10/0x10 > > [ 31.611502] ? lock_acquire.part.0+0x152/0x390 > > [ 31.611874] ? __this_cpu_preempt_check+0x21/0x30 > > [ 31.612266] ? lock_is_held_type+0xef/0x150 > > [ 31.612611] ? __this_cpu_preempt_check+0x21/0x30 > > [ 31.613002] do_filp_open+0x1cc/0x420 > > [ 31.613316] ? __pfx_do_filp_open+0x10/0x10 > > [ 31.613669] ? lock_release+0x441/0x870 > > [ 31.614043] ? __pfx_lock_release+0x10/0x10 > > [ 31.614404] ? do_raw_spin_unlock+0x15c/0x210 > > [ 31.614784] do_sys_openat2+0x185/0x1f0 > > [ 31.615105] ? __pfx_do_sys_openat2+0x10/0x10 > > [ 31.615470] ? __this_cpu_preempt_check+0x21/0x30 > > [ 31.615854] ? seqcount_lockdep_reader_access.constprop.0+0xb4/0xd0 > > [ 31.616370] ? lockdep_hardirqs_on+0x89/0x110 > > [ 31.616736] __x64_sys_openat+0x17a/0x240 > > [ 31.617067] ? __pfx___x64_sys_openat+0x10/0x10 > > [ 31.617447] ? __audit_syscall_entry+0x39c/0x500 > > [ 31.617870] x64_sys_call+0x1a52/0x20d0 > > [ 31.618194] do_syscall_64+0x6d/0x140 > > [ 31.618504] entry_SYSCALL_64_after_hwframe+0x76/0x7e > > [ 31.618917] RIP: 0033:0x7f812eb3e8c4 > > [ 31.619225] Code: 24 20 eb 8f 66 90 44 89 54 24 0c e8 76 d3 f5 ff 44 8b 54 24 0c 44 89 e2 48 89 ee 41 89 c0 bf 9c ff ff ff b8 01 01 00 00 0f 05 <48> 3d 00 f0 ff ff 77 34 44 89 c7 89 44 24 0c e8 c8 d3 f5 ff 8b 44 > > [ 31.620656] RSP: 002b:00007f812eca1b90 EFLAGS: 00000293 ORIG_RAX: 0000000000000101 > > [ 31.621255] RAX: ffffffffffffffda RBX: 00007f812eca2640 RCX: 00007f812eb3e8c4 > > [ 31.621864] RDX: 0000000000010000 RSI: 0000000020002080 RDI: 00000000ffffff9c > > [ 31.622428] RBP: 0000000020002080 R08: 0000000000000000 R09: 0000000000000000 > > [ 31.622987] R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000010000 > > [ 31.623549] R13: 0000000000000006 R14: 00007f812ea9f560 R15: 0000000000000000 > > [ 31.624123] </TASK> > > [ 31.624316] irq event stamp: 1655 > > [ 31.624595] hardirqs last enabled at (1663): [<ffffffff8145cb85>] __up_console_sem+0x95/0xb0 > > [ 31.625310] hardirqs last disabled at (1670): [<ffffffff8145cb6a>] __up_console_sem+0x7a/0xb0 > > [ 31.626039] softirqs last enabled at (1466): [<ffffffff8128a889>] __irq_exit_rcu+0xa9/0x120 > > [ 31.626726] softirqs last disabled at (1449): [<ffffffff8128a889>] __irq_exit_rcu+0xa9/0x120 > > [ 31.627405] ---[ end trace 0000000000000000 ]--- > > " > > > > I hope you find it useful. > > > > Regards, > > Yi Lai > > > > --- > > > > If you don't need the following environment to reproduce the problem or if you > > already have one reproduced environment, please ignore the following information. > > > > How to reproduce: > > git clone https://gitlab.com/xupengfe/repro_vm_env.git > > cd repro_vm_env > > tar -xvf repro_vm_env.tar.gz > > cd repro_vm_env; ./start3.sh // it needs qemu-system-x86_64 and I used v7.1.0 > > // start3.sh will load bzImage_2241ab53cbb5cdb08a6b2d4688feb13971058f65 v6.2-rc5 kernel > > // You could change the bzImage_xxx as you want > > // Maybe you need to remove line "-drive if=pflash,format=raw,readonly=on,file=./OVMF_CODE.fd \" for different qemu version > > You could use below command to log in, there is no password for root. > > ssh -p 10023 root@localhost > > > > After login vm(virtual machine) successfully, you could transfer reproduced > > binary to the vm by below way, and reproduce the problem in vm: > > gcc -pthread -o repro repro.c > > scp -P 10023 repro root@localhost:/root/ > > > > Get the bzImage for target kernel: > > Please use target kconfig and copy it to kernel_src/.config > > make olddefconfig > > make -jx bzImage //x should equal or less than cpu num your pc has > > > > Fill the bzImage file into above start3.sh to load the target kernel in vm. > > > > Tips: > > If you already have qemu-system-x86_64, please ignore below info. > > If you want to install qemu v7.1.0 version: > > git clone https://github.com/qemu/qemu.git > > cd qemu > > git checkout -f v7.1.0 > > mkdir build > > cd build > > yum install -y ninja-build.x86_64 > > yum -y install libslirp-devel.x86_64 > > ../configure --target-list=x86_64-softmmu --enable-kvm --enable-vnc --enable-gtk --enable-sdl --enable-usb-redir --enable-slirp > > make > > make install > > > > On Wed, May 29, 2024 at 05:52:07PM +0200, Miklos Szeredi wrote: > > > Virtiofs has its own queing mechanism, but still requests are first queued > > > on fiq->pending to be immediately dequeued and queued onto the virtio > > > queue. > > > > > > The queuing on fiq->pending is unnecessary and might even have some > > > performance impact due to being a contention point. > > > > > > Forget requests are handled similarly. > > > > > > Move the queuing of requests and forgets into the fiq->ops->*. > > > fuse_iqueue_ops are renamed to reflect the new semantics. > > > > > > Signed-off-by: Miklos Szeredi <mszeredi@xxxxxxxxxx> > > > --- > > > fs/fuse/dev.c | 159 ++++++++++++++++++++++++-------------------- > > > fs/fuse/fuse_i.h | 19 ++---- > > > fs/fuse/virtio_fs.c | 41 ++++-------- > > > 3 files changed, 106 insertions(+), 113 deletions(-) > > > > > > diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c > > > index 9eb191b5c4de..a4f510f1b1a4 100644 > > > --- a/fs/fuse/dev.c > > > +++ b/fs/fuse/dev.c > > > @@ -192,10 +192,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) > > > } > > > EXPORT_SYMBOL_GPL(fuse_len_args); > > > > > > -u64 fuse_get_unique(struct fuse_iqueue *fiq) > > > +static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq) > > > { > > > fiq->reqctr += FUSE_REQ_ID_STEP; > > > return fiq->reqctr; > > > + > > > +} > > > + > > > +u64 fuse_get_unique(struct fuse_iqueue *fiq) > > > +{ > > > + u64 ret; > > > + > > > + spin_lock(&fiq->lock); > > > + ret = fuse_get_unique_locked(fiq); > > > + spin_unlock(&fiq->lock); > > > + > > > + return ret; > > > } > > > EXPORT_SYMBOL_GPL(fuse_get_unique); > > > > > > @@ -215,22 +227,67 @@ __releases(fiq->lock) > > > spin_unlock(&fiq->lock); > > > } > > > > > > +static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget) > > > +{ > > > + spin_lock(&fiq->lock); > > > + if (fiq->connected) { > > > + fiq->forget_list_tail->next = forget; > > > + fiq->forget_list_tail = forget; > > > + fuse_dev_wake_and_unlock(fiq); > > > + } else { > > > + kfree(forget); > > > + spin_unlock(&fiq->lock); > > > + } > > > +} > > > + > > > +static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) > > > +{ > > > + spin_lock(&fiq->lock); > > > + if (list_empty(&req->intr_entry)) { > > > + list_add_tail(&req->intr_entry, &fiq->interrupts); > > > + /* > > > + * Pairs with smp_mb() implied by test_and_set_bit() > > > + * from fuse_request_end(). > > > + */ > > > + smp_mb(); > > > + if (test_bit(FR_FINISHED, &req->flags)) { > > > + list_del_init(&req->intr_entry); > > > + spin_unlock(&fiq->lock); > > > + } > > > + fuse_dev_wake_and_unlock(fiq); > > > + } else { > > > + spin_unlock(&fiq->lock); > > > + } > > > +} > > > + > > > +static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req) > > > +{ > > > + spin_lock(&fiq->lock); > > > + if (fiq->connected) { > > > + if (req->in.h.opcode != FUSE_NOTIFY_REPLY) > > > + req->in.h.unique = fuse_get_unique_locked(fiq); > > > + list_add_tail(&req->list, &fiq->pending); > > > + fuse_dev_wake_and_unlock(fiq); > > > + } else { > > > + spin_unlock(&fiq->lock); > > > + req->out.h.error = -ENOTCONN; > > > + fuse_request_end(req); > > in the case where the connection has been aborted, this request will > still have the FR_PENDING flag set on it when it calls > fuse_request_end(). I think we can just call fuse_put_request() here > instead. actually, after looking more at this, I missed that this patch changes the logic to now call request_wait_answer() (for non-background requests) even if the connection was aborted. So maybe just clear_bit(FR_PENDING, &req->flags) before we call fuse_request_end() is the best. > > > > + } > > > +} > > > +