Not necessary to use a timer to do that, and dwork is just fine, then we don't need to always disable interrupt when acquiring ->loop_work_lock. Cc: Michal Koutný <mkoutny@xxxxxxxx> Cc: Dan Schatzberg <schatzberg.dan@xxxxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- drivers/block/loop.c | 76 ++++++++++++++++++++++---------------------- drivers/block/loop.h | 2 +- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 8378b8455f7c..7fa0c70a3ea6 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -929,7 +929,6 @@ struct loop_worker { static void loop_workfn(struct work_struct *work); static void loop_rootcg_workfn(struct work_struct *work); -static void loop_free_idle_workers(struct timer_list *timer); #ifdef CONFIG_BLK_CGROUP static inline int queue_on_root_worker(struct cgroup_subsys_state *css) @@ -957,7 +956,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) blkcg_css = &bio_blkcg(rq->bio)->css; #endif - spin_lock_irq(&lo->lo_work_lock); + spin_lock(&lo->lo_work_lock); if (queue_on_root_worker(blkcg_css)) goto queue_work; @@ -1012,7 +1011,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) } list_add_tail(&cmd->list_entry, cmd_list); queue_work(lo->workqueue, work); - spin_unlock_irq(&lo->lo_work_lock); + spin_unlock(&lo->lo_work_lock); } static void loop_update_rotational(struct loop_device *lo) @@ -1134,6 +1133,34 @@ loop_set_status_from_info(struct loop_device *lo, return 0; } +static void loop_set_timer(struct loop_device *lo) +{ + schedule_delayed_work(&lo->idle_work, LOOP_IDLE_WORKER_TIMEOUT); +} + +static void loop_free_idle_workers(struct work_struct *work) +{ + struct loop_device *lo = container_of(work, struct loop_device, + idle_work.work); + struct loop_worker *pos, *worker; + + spin_lock(&lo->lo_work_lock); + list_for_each_entry_safe(worker, pos, &lo->idle_worker_list, + idle_list) { + if (time_is_after_jiffies(worker->last_ran_at + + LOOP_IDLE_WORKER_TIMEOUT)) + break; + list_del(&worker->idle_list); + rb_erase(&worker->rb_node, &lo->worker_tree); + css_put(worker->blkcg_css); + kfree(worker); + } + if (!list_empty(&lo->idle_worker_list)) + loop_set_timer(lo); + spin_unlock(&lo->lo_work_lock); +} + + static int loop_configure(struct loop_device *lo, fmode_t mode, struct block_device *bdev, const struct loop_config *config) @@ -1213,8 +1240,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, INIT_LIST_HEAD(&lo->rootcg_cmd_list); INIT_LIST_HEAD(&lo->idle_worker_list); lo->worker_tree = RB_ROOT; - timer_setup(&lo->timer, loop_free_idle_workers, - TIMER_DEFERRABLE); + INIT_DELAYED_WORK(&lo->idle_work, loop_free_idle_workers); lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO; lo->lo_device = bdev; lo->lo_backing_file = file; @@ -1304,7 +1330,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) blk_mq_freeze_queue(lo->lo_queue); destroy_workqueue(lo->workqueue); - spin_lock_irq(&lo->lo_work_lock); + spin_lock(&lo->lo_work_lock); list_for_each_entry_safe(worker, pos, &lo->idle_worker_list, idle_list) { list_del(&worker->idle_list); @@ -1312,8 +1338,8 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) css_put(worker->blkcg_css); kfree(worker); } - spin_unlock_irq(&lo->lo_work_lock); - del_timer_sync(&lo->timer); + spin_unlock(&lo->lo_work_lock); + cancel_delayed_work_sync(&lo->idle_work); spin_lock_irq(&lo->lo_lock); lo->lo_backing_file = NULL; @@ -2133,11 +2159,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd) } } -static void loop_set_timer(struct loop_device *lo) -{ - timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT); -} - static void loop_process_work(struct loop_worker *worker, struct list_head *cmd_list, struct loop_device *lo) { @@ -2145,17 +2166,17 @@ static void loop_process_work(struct loop_worker *worker, struct loop_cmd *cmd; current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO; - spin_lock_irq(&lo->lo_work_lock); + spin_lock(&lo->lo_work_lock); while (!list_empty(cmd_list)) { cmd = container_of( cmd_list->next, struct loop_cmd, list_entry); list_del(cmd_list->next); - spin_unlock_irq(&lo->lo_work_lock); + spin_unlock(&lo->lo_work_lock); loop_handle_cmd(cmd); cond_resched(); - spin_lock_irq(&lo->lo_work_lock); + spin_lock(&lo->lo_work_lock); } /* @@ -2168,7 +2189,7 @@ static void loop_process_work(struct loop_worker *worker, list_add_tail(&worker->idle_list, &lo->idle_worker_list); loop_set_timer(lo); } - spin_unlock_irq(&lo->lo_work_lock); + spin_unlock(&lo->lo_work_lock); current->flags = orig_flags; } @@ -2204,27 +2225,6 @@ static void loop_rootcg_workfn(struct work_struct *work) loop_process_work(NULL, &lo->rootcg_cmd_list, lo); } -static void loop_free_idle_workers(struct timer_list *timer) -{ - struct loop_device *lo = container_of(timer, struct loop_device, timer); - struct loop_worker *pos, *worker; - - spin_lock_irq(&lo->lo_work_lock); - list_for_each_entry_safe(worker, pos, &lo->idle_worker_list, - idle_list) { - if (time_is_after_jiffies(worker->last_ran_at + - LOOP_IDLE_WORKER_TIMEOUT)) - break; - list_del(&worker->idle_list); - rb_erase(&worker->rb_node, &lo->worker_tree); - css_put(worker->blkcg_css); - kfree(worker); - } - if (!list_empty(&lo->idle_worker_list)) - loop_set_timer(lo); - spin_unlock_irq(&lo->lo_work_lock); -} - static const struct blk_mq_ops loop_mq_ops = { .queue_rq = loop_queue_rq, .complete = lo_complete_rq, diff --git a/drivers/block/loop.h b/drivers/block/loop.h index a52a3fd89457..9df889af1bcf 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -60,7 +60,7 @@ struct loop_device { struct list_head rootcg_cmd_list; struct list_head idle_worker_list; struct rb_root worker_tree; - struct timer_list timer; + struct delayed_work idle_work; bool use_dio; bool sysfs_inited; -- 2.31.1