From: Hou Tao <houtao1@xxxxxxxxxx> When invoking virtio_fs_enqueue_req() through kworker, both the allocation of the sg array and the bounce buffer still use GFP_ATOMIC. Considering the size of the sg array may be greater than PAGE_SIZE, use GFP_NOFS instead of GFP_ATOMIC to lower the possibility of memory allocation failure and to avoid unnecessarily depleting the atomic reserves. GFP_NOFS is not passed to virtio_fs_enqueue_req() directly, use GFP_KERNEL and memalloc_nofs_{save|restore} helpers instead. Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx> --- fs/fuse/virtio_fs.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 34b9370beba6d..9ee71051c89f2 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -108,7 +108,8 @@ struct virtio_fs_argbuf { }; static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, - struct fuse_req *req, bool in_flight); + struct fuse_req *req, bool in_flight, + gfp_t gfp); static const struct constant_table dax_param_enums[] = { {"always", FUSE_DAX_ALWAYS }, @@ -394,6 +395,8 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) /* Dispatch pending requests */ while (1) { + unsigned int flags; + spin_lock(&fsvq->lock); req = list_first_entry_or_null(&fsvq->queued_reqs, struct fuse_req, list); @@ -404,7 +407,9 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) list_del_init(&req->list); spin_unlock(&fsvq->lock); - ret = virtio_fs_enqueue_req(fsvq, req, true); + flags = memalloc_nofs_save(); + ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL); + memalloc_nofs_restore(flags); if (ret < 0) { if (ret == -ENOMEM || ret == -ENOSPC) { spin_lock(&fsvq->lock); @@ -1332,7 +1337,8 @@ static bool use_scattered_argbuf(struct fuse_req *req) /* Add a request to a virtqueue and kick the device */ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, - struct fuse_req *req, bool in_flight) + struct fuse_req *req, bool in_flight, + gfp_t gfp) { /* requests need at least 4 elements */ struct scatterlist *stack_sgs[6]; @@ -1364,8 +1370,8 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, total_sgs = sg_count_fuse_req(req, in_args_len, out_args_len, flat_argbuf); if (total_sgs > ARRAY_SIZE(stack_sgs)) { - sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC); - sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC); + sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp); + sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp); if (!sgs || !sg) { ret = -ENOMEM; goto out; @@ -1373,8 +1379,8 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, } /* Use a bounce buffer since stack args cannot be mapped */ - req->argbuf = virtio_fs_argbuf_new(in_args_len, out_args_len, - GFP_ATOMIC, flat_argbuf); + req->argbuf = virtio_fs_argbuf_new(in_args_len, out_args_len, gfp, + flat_argbuf); if (!req->argbuf) { ret = -ENOMEM; goto out; @@ -1473,7 +1479,7 @@ __releases(fiq->lock) fuse_len_args(req->args->out_numargs, req->args->out_args)); fsvq = &fs->vqs[queue_id]; - ret = virtio_fs_enqueue_req(fsvq, req, false); + ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC); if (ret < 0) { if (ret == -ENOMEM || ret == -ENOSPC) { /* -- 2.29.2