This patch solves problem related with lack of possibility to call aio_complete() from cancel callback. It was because it needs to lock ctx->ctx_lock which is always locked before kiocb_cancel() call. So there was no way to complete request properly. Now spinlock is unlocked before cancel callback function call, so spinlock recursion will not occur in case of aio_complete() call. After cancel function call spinlock is locked back. There is also __must_hold() macro added for kiocb_cancel() function, to allow sparse spin lock checking. Signed-off-by: Robert Baldyga <r.baldyga@xxxxxxxxxxx> --- This patch was created according to suggestions of Benjamin LaHaise (https://lkml.org/lkml/2014/1/23/336). fs/aio.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/fs/aio.c b/fs/aio.c index 062a5f6..6d5cd9e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -472,8 +472,10 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) EXPORT_SYMBOL(kiocb_set_cancel_fn); static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) + __must_hold(&ctx->ctx_lock) { kiocb_cancel_fn *old, *cancel; + int ret; /* * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it @@ -489,7 +491,15 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); } while (cancel != old); - return cancel(kiocb); + /* + * cancel() function may call aio_complete function, which needs to + * lock ctx->ctx_lock, so we call cancel() with spinlock unlocked + */ + spin_unlock(&ctx->ctx_lock); + ret = cancel(kiocb); + spin_lock(&ctx->ctx_lock); + + return ret; } static void free_ioctx(struct work_struct *work) -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html