Currently, when checking whether a request has timed out, we check fpq processing, but fuse-over-io-uring has one fpq per core and 256 entries in the processing table. For systems where there are a large number of cores, this may be too much overhead. Instead of checking the fpq processing list, check ent_w_req_queue and ent_in_userspace. Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx> --- fs/fuse/dev.c | 2 +- fs/fuse/dev_uring.c | 26 +++++++++++++++++++++----- fs/fuse/fuse_dev_i.h | 1 - 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 3c03aac480a4..80a11ef4b69a 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -45,7 +45,7 @@ bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list) return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout); } -bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing) +static bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing) { int i; diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c index ab8c26042aa8..50f5b4e32ed5 100644 --- a/fs/fuse/dev_uring.c +++ b/fs/fuse/dev_uring.c @@ -140,6 +140,21 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring) } } +static bool ent_list_request_expired(struct fuse_conn *fc, struct list_head *list) +{ + struct fuse_ring_ent *ent; + struct fuse_req *req; + + ent = list_first_entry_or_null(list, struct fuse_ring_ent, list); + if (!ent) + return false; + + req = ent->fuse_req; + + return time_is_before_jiffies(req->create_time + + fc->timeout.req_timeout); +} + bool fuse_uring_request_expired(struct fuse_conn *fc) { struct fuse_ring *ring = fc->ring; @@ -157,7 +172,8 @@ bool fuse_uring_request_expired(struct fuse_conn *fc) spin_lock(&queue->lock); if (fuse_request_expired(fc, &queue->fuse_req_queue) || fuse_request_expired(fc, &queue->fuse_req_bg_queue) || - fuse_fpq_processing_expired(fc, queue->fpq.processing)) { + ent_list_request_expired(fc, &queue->ent_w_req_queue) || + ent_list_request_expired(fc, &queue->ent_in_userspace)) { spin_unlock(&queue->lock); return true; } @@ -495,7 +511,7 @@ static void fuse_uring_cancel(struct io_uring_cmd *cmd, spin_lock(&queue->lock); if (ent->state == FRRS_AVAILABLE) { ent->state = FRRS_USERSPACE; - list_move(&ent->list, &queue->ent_in_userspace); + list_move_tail(&ent->list, &queue->ent_in_userspace); need_cmd_done = true; ent->cmd = NULL; } @@ -715,7 +731,7 @@ static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent, cmd = ent->cmd; ent->cmd = NULL; ent->state = FRRS_USERSPACE; - list_move(&ent->list, &queue->ent_in_userspace); + list_move_tail(&ent->list, &queue->ent_in_userspace); spin_unlock(&queue->lock); io_uring_cmd_done(cmd, 0, 0, issue_flags); @@ -769,7 +785,7 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent, spin_unlock(&fiq->lock); ent->fuse_req = req; ent->state = FRRS_FUSE_REQ; - list_move(&ent->list, &queue->ent_w_req_queue); + list_move_tail(&ent->list, &queue->ent_w_req_queue); fuse_uring_add_to_pq(ent, req); } @@ -1185,7 +1201,7 @@ static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd, spin_lock(&queue->lock); ent->state = FRRS_USERSPACE; - list_move(&ent->list, &queue->ent_in_userspace); + list_move_tail(&ent->list, &queue->ent_in_userspace); ent->cmd = NULL; spin_unlock(&queue->lock); diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h index 3c4ae4d52b6f..19c29c6000a7 100644 --- a/fs/fuse/fuse_dev_i.h +++ b/fs/fuse/fuse_dev_i.h @@ -63,7 +63,6 @@ void fuse_dev_queue_forget(struct fuse_iqueue *fiq, void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req); bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list); -bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing); #endif -- 2.43.5