kernfs_drain() is updated to handle whether draining is necessary or not rather than its caller. __kernfs_remove() now always calls kernfs_drain() instead of filtering based on KERNFS_ACTIVATED. kernfs_drain() now tests kn->active and kernfs_should_drain_open_files() to determine whether draining is necessary at all. If not, it returns %false without doing anything. Otherwise, it unlocks kernfs_rwsem and drains as before and returns %true. The return value isn't used yet. Using the precise conditions allows skipping more aggressively. This isn't a meaningful optimization on its own but will enable future stand-alone kernfs_deactivate() implementation. While at it, make minor comment updates. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> --- fs/kernfs/dir.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 8ae44db920d4..f857731598cd 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -460,10 +460,14 @@ void kernfs_put_active(struct kernfs_node *kn) * @kn: kernfs_node to drain * * Drain existing usages and nuke all existing mmaps of @kn. Mutiple - * removers may invoke this function concurrently on @kn and all will + * callers may invoke this function concurrently on @kn and all will * return after draining is complete. + * + * RETURNS: + * %false if nothing needed to be drained; otherwise, %true. On %true return, + * kernfs_rwsem has been released and re-acquired. */ -static void kernfs_drain(struct kernfs_node *kn) +static bool kernfs_drain(struct kernfs_node *kn) __releases(&kernfs_root(kn)->kernfs_rwsem) __acquires(&kernfs_root(kn)->kernfs_rwsem) { @@ -472,6 +476,16 @@ static void kernfs_drain(struct kernfs_node *kn) lockdep_assert_held_write(&root->kernfs_rwsem); WARN_ON_ONCE(kernfs_active(kn)); + /* + * Skip draining if already fully drained. This avoids draining and its + * lockdep annotations for nodes which have never been activated + * allowing embedding kernfs_remove() in create error paths without + * worrying about draining. + */ + if (atomic_read(&kn->active) == KN_DEACTIVATED_BIAS && + kernfs_should_drain_open_files(kn)) + return false; + up_write(&root->kernfs_rwsem); if (kernfs_lockdep(kn)) { @@ -480,7 +494,6 @@ static void kernfs_drain(struct kernfs_node *kn) lock_contended(&kn->dep_map, _RET_IP_); } - /* but everyone should wait for draining */ wait_event(root->deactivate_waitq, atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); @@ -493,6 +506,8 @@ static void kernfs_drain(struct kernfs_node *kn) kernfs_drain_open_files(kn); down_write(&root->kernfs_rwsem); + + return true; } /** @@ -1370,23 +1385,14 @@ static void __kernfs_remove(struct kernfs_node *kn) pos = kernfs_leftmost_descendant(kn); /* - * kernfs_drain() drops kernfs_rwsem temporarily and @pos's + * kernfs_drain() may drop kernfs_rwsem temporarily and @pos's * base ref could have been put by someone else by the time * the function returns. Make sure it doesn't go away * underneath us. */ kernfs_get(pos); - /* - * Drain iff @kn was activated. This avoids draining and - * its lockdep annotations for nodes which have never been - * activated and allows embedding kernfs_remove() in create - * error paths without worrying about draining. - */ - if (kn->flags & KERNFS_ACTIVATED) - kernfs_drain(pos); - else - WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); + kernfs_drain(pos); /* * kernfs_unlink_sibling() succeeds once per node. Use it -- 2.37.2