[PATCH 5/5] aio: remove dead cancellation code

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This removes the aio cancel infrastructure now that nothing in-tree is
setting ki_cancel.  It maintains the current behaviour of returning
-EAGAIN on cancel attempts.

The most significant benefit of this is removing the serialization of
the list of active iocbs during submission.  It'll be easier to
implement lockless submission.

This has only been compiled.
---
 fs/aio.c            | 121 +++++++---------------------------------------------
 include/linux/aio.h |  18 --------
 2 files changed, 15 insertions(+), 124 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 1de4f78..97e3582 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -247,8 +247,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 	spin_lock_init(&ctx->ring_info.ring_lock);
 	init_waitqueue_head(&ctx->wait);
 
-	INIT_LIST_HEAD(&ctx->active_reqs);
-
 	if (aio_setup_ring(ctx) < 0)
 		goto out_freectx;
 
@@ -280,33 +278,13 @@ out_freectx:
 	return ERR_PTR(err);
 }
 
-/* kill_ctx
- *	Cancels all outstanding aio requests on an aio context.  Used 
- *	when the processes owning a context have all exited to encourage 
- *	the rapid destruction of the kioctx.
- */
-static void kill_ctx(struct kioctx *ctx)
+static void wait_for_active_reqs(struct kioctx *ctx)
 {
-	int (*cancel)(struct kiocb *, struct io_event *);
 	struct task_struct *tsk = current;
 	DECLARE_WAITQUEUE(wait, tsk);
-	struct io_event res;
 
 	spin_lock_irq(&ctx->ctx_lock);
 	ctx->dead = 1;
-	while (!list_empty(&ctx->active_reqs)) {
-		struct list_head *pos = ctx->active_reqs.next;
-		struct kiocb *iocb = list_kiocb(pos);
-		list_del_init(&iocb->ki_list);
-		cancel = iocb->ki_cancel;
-		kiocbSetCancelled(iocb);
-		if (cancel) {
-			iocb->ki_users++;
-			spin_unlock_irq(&ctx->ctx_lock);
-			cancel(iocb, &res);
-			spin_lock_irq(&ctx->ctx_lock);
-		}
-	}
 
 	if (!ctx->reqs_active)
 		goto out;
@@ -357,7 +335,7 @@ void exit_aio(struct mm_struct *mm)
 		ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
 		hlist_del_rcu(&ctx->list);
 
-		kill_ctx(ctx);
+		wait_for_active_reqs(ctx);
 
 		if (1 != atomic_read(&ctx->users))
 			printk(KERN_DEBUG
@@ -395,11 +373,9 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
 	if (unlikely(!req))
 		return NULL;
 
-	req->ki_flags = 0;
 	req->ki_users = 2;
 	req->ki_key = 0;
 	req->ki_ctx = ctx;
-	req->ki_cancel = NULL;
 	req->ki_retry = NULL;
 	req->ki_dtor = NULL;
 	req->private = NULL;
@@ -435,7 +411,6 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
 	spin_lock_irq(&ctx->ctx_lock);
 	list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
 		list_del(&req->ki_batch);
-		list_del(&req->ki_list);
 		kmem_cache_free(kiocb_cachep, req);
 		ctx->reqs_active--;
 	}
@@ -483,10 +458,7 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
 	}
 
 	batch->count -= allocated;
-	list_for_each_entry(req, &batch->head, ki_batch) {
-		list_add(&req->ki_list, &ctx->active_reqs);
-		ctx->reqs_active++;
-	}
+	ctx->reqs_active += allocated;
 
 	kunmap_atomic(ring);
 	spin_unlock_irq(&ctx->ctx_lock);
@@ -539,8 +511,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
 	BUG_ON(req->ki_users < 0);
 	if (likely(req->ki_users))
 		return 0;
-	list_del(&req->ki_list);		/* remove from active_reqs */
-	req->ki_cancel = NULL;
 	req->ki_retry = NULL;
 
 	fput(req->ki_filp);
@@ -629,13 +599,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
 	 */
 	spin_lock_irqsave(&ctx->ctx_lock, flags);
 
-	/*
-	 * cancelled requests don't get events, userland was given one
-	 * when the event got cancelled.
-	 */
-	if (kiocbIsCancelled(iocb))
-		goto put_rq;
-
 	ring = kmap_atomic(info->ring_pages[0]);
 
 	tail = info->tail;
@@ -673,7 +636,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
 	if (iocb->ki_eventfd != NULL)
 		eventfd_signal(iocb->ki_eventfd, 1);
 
-put_rq:
 	/* everything turned out well, dispose of the aiocb. */
 	ret = __aio_put_req(ctx, iocb);
 
@@ -898,7 +860,7 @@ static void io_destroy(struct kioctx *ioctx)
 	if (likely(!was_dead))
 		put_ioctx(ioctx);	/* twice for the list */
 
-	kill_ctx(ioctx);
+	wait_for_active_reqs(ioctx);
 
 	/*
 	 * Wake up any waiters.  The setting of ctx->dead must be seen
@@ -1284,11 +1246,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 	if (ret)
 		goto out_put_req;
 
-	if (unlikely(kiocbIsCancelled(req))) {
-		ret = -EINTR;
-	} else {
-		ret = req->ki_retry(req);
-	}
+	ret = req->ki_retry(req);
 	if (ret != -EIOCBQUEUED) {
 		/*
 		 * There's no easy way to restart the syscall since other AIO's
@@ -1384,41 +1342,21 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
 	return do_io_submit(ctx_id, nr, iocbpp, 0);
 }
 
-/* lookup_kiocb
- *	Finds a given iocb for cancellation.
- */
-static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
-				  u32 key)
-{
-	struct list_head *pos;
-
-	assert_spin_locked(&ctx->ctx_lock);
-
-	/* TODO: use a hash or array, this sucks. */
-	list_for_each(pos, &ctx->active_reqs) {
-		struct kiocb *kiocb = list_kiocb(pos);
-		if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
-			return kiocb;
-	}
-	return NULL;
-}
-
 /* sys_io_cancel:
- *	Attempts to cancel an iocb previously passed to io_submit.  If
- *	the operation is successfully cancelled, the resulting event is
- *	copied into the memory pointed to by result without being placed
- *	into the completion queue and 0 is returned.  May fail with
- *	-EFAULT if any of the data structures pointed to are invalid.
- *	May fail with -EINVAL if aio_context specified by ctx_id is
- *	invalid.  May fail with -EAGAIN if the iocb specified was not
- *	cancelled.  Will fail with -ENOSYS if not implemented.
+ *	AIO cancellation has been deprecated for lack of use.  It was
+ *	only slowing down the fast paths.  This remaining implementation
+ *	behaves as cancellation did for all iocbs that never supported
+ *	cancellation to begin with.
+ *
+ *	May fail with -EFAULT if any of the data structures pointed to
+ *	are invalid.  May fail with -EINVAL if aio_context specified by
+ *	ctx_id is invalid.  May fail with -EAGAIN if the iocb specified
+ *	was not cancelled.  Will fail with -ENOSYS if not implemented.
  */
 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
 		struct io_event __user *, result)
 {
-	int (*cancel)(struct kiocb *iocb, struct io_event *res);
 	struct kioctx *ctx;
-	struct kiocb *kiocb;
 	u32 key;
 	int ret;
 
@@ -1430,37 +1368,8 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
 	if (unlikely(!ctx))
 		return -EINVAL;
 
-	spin_lock_irq(&ctx->ctx_lock);
-	ret = -EAGAIN;
-	kiocb = lookup_kiocb(ctx, iocb, key);
-	if (kiocb && kiocb->ki_cancel) {
-		cancel = kiocb->ki_cancel;
-		kiocb->ki_users ++;
-		kiocbSetCancelled(kiocb);
-	} else
-		cancel = NULL;
-	spin_unlock_irq(&ctx->ctx_lock);
-
-	if (NULL != cancel) {
-		struct io_event tmp;
-		pr_debug("calling cancel\n");
-		memset(&tmp, 0, sizeof(tmp));
-		tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
-		tmp.data = kiocb->ki_user_data;
-		ret = cancel(kiocb, &tmp);
-		if (!ret) {
-			/* Cancellation succeeded -- copy the result
-			 * into the user's buffer.
-			 */
-			if (copy_to_user(result, &tmp, sizeof(tmp)))
-				ret = -EFAULT;
-		}
-	} else
-		ret = -EINVAL;
-
 	put_ioctx(ctx);
-
-	return ret;
+	return -EAGAIN;
 }
 
 /* io_getevents:
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 387dad0..1ec0c06 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -13,15 +13,6 @@ struct kioctx;
 
 #define KIOCB_SYNC_KEY		(~0U)
 
-/* ki_flags bits */
-#define KIF_CANCELLED		2
-
-#define kiocbSetCancelled(iocb)	set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
-#define kiocbClearCancelled(iocb)	clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
-#define kiocbIsCancelled(iocb)	test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
 /* is there a better place to document function pointer methods? */
 /**
  * ki_retry	-	iocb forward progress callback
@@ -49,7 +40,6 @@ struct kioctx;
  */
 struct kiocb {
 	struct list_head	ki_run_list;
-	unsigned long		ki_flags;
 	int			ki_users;
 	unsigned		ki_key;		/* id of this request */
 
@@ -78,8 +68,6 @@ struct kiocb {
  	unsigned long		ki_nr_segs;
  	unsigned long		ki_cur_seg;
 
-	struct list_head	ki_list;	/* the aio core uses this
-						 * for cancellation */
 	struct list_head	ki_batch;	/* batch allocation */
 
 	/*
@@ -155,7 +143,6 @@ struct kioctx {
 	spinlock_t		ctx_lock;
 
 	int			reqs_active;
-	struct list_head	active_reqs;	/* used for cancellation */
 
 	/* sys_io_setup currently limits this to an unsigned int */
 	unsigned		max_reqs;
@@ -185,11 +172,6 @@ static inline long do_io_submit(aio_context_t ctx_id, long nr,
 				bool compat) { return 0; }
 #endif /* CONFIG_AIO */
 
-static inline struct kiocb *list_kiocb(struct list_head *h)
-{
-	return list_entry(h, struct kiocb, ki_list);
-}
-
 /* for sysctl: */
 extern unsigned long aio_nr;
 extern unsigned long aio_max_nr;
-- 
1.7.11.4

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux