Similarly to commit 457e490f2b741 ("blkcg: allocate struct blkcg_gq outside request queue spinlock"), blkg_create can also trigger occasional -ENOMEM failures at the radix insertion because any allocation inside blkg_create has to be non-blocking, making it more likely to fail. This causes trouble for userspace tools trying to configure io weights who need to deal with this condition. This patch reduces the occurrence of -ENOMEMs on this path by preloading the radix tree element on a GFP_KERNEL context, such that we guarantee the later non-blocking insertion won't fail. A similar solution exists in blkcg_init_queue for the same situation. Signed-off-by: Gabriel Krisman Bertazi <krisman@xxxxxxxxxxxxx> --- block/blk-cgroup.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f9b55614d67d..bbf848604089 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -612,6 +612,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, struct gendisk *disk; struct request_queue *q; struct blkcg_gq *blkg; + bool preloaded = false; int ret; disk = blkcg_conf_get_disk(&input); @@ -657,6 +658,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, goto fail; } + preloaded = !radix_tree_preload(GFP_KERNEL); + rcu_read_lock(); spin_lock_irq(&q->queue_lock); @@ -676,6 +679,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, } } + if (preloaded) + radix_tree_preload_end(); + if (pos == blkcg) goto success; } @@ -688,6 +694,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, fail_unlock: spin_unlock_irq(&q->queue_lock); rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); fail: put_disk_and_module(disk); /* -- 2.28.0