__blkg_lookup_create() currently fails if radix_tree_preload() fails; however, preload failrue doesn't imply insertion failure. Don't fail __blkg_lookup_create() on preload failure. While at it, drop sparse locking annotation which no longer applies. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> --- block/blk-cgroup.c | 13 +++++-------- 1 files changed, 5 insertions(+), 8 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4ab7420..197fb50 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -177,9 +177,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup); static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) { struct blkcg_gq *blkg; + bool preloaded; int ret; WARN_ON_ONCE(!rcu_read_lock_held()); @@ -203,9 +203,7 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, goto err_put; /* insert */ - ret = radix_tree_preload(GFP_ATOMIC); - if (ret) - goto err_free; + preloaded = !radix_tree_preload(GFP_ATOMIC); spin_lock(&blkcg->lock); ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); @@ -215,14 +213,13 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, } spin_unlock(&blkcg->lock); - radix_tree_preload_end(); - + if (preloaded) + radix_tree_preload_end(); if (!ret) return blkg; -err_free: - blkg_free(blkg); err_put: css_put(&blkcg->css); + blkg_free(blkg); return ERR_PTR(ret); } -- 1.7.7.3 _______________________________________________ Containers mailing list Containers@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/containers