Re: [PATCH v5 08/20] kthread: Allow to cancel kthread work

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Petr,

[auto build test WARNING on soc-thermal/next]
[also build test WARNING on v4.5-rc5 next-20160222]
[if your patch is applied to the wrong git tree, please drop us a note to help improving the system]

url:    https://github.com/0day-ci/linux/commits/Petr-Mladek/kthread-Use-kthread-worker-API-more-widely/20160222-230250
base:   https://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal next
reproduce: make htmldocs

All warnings (new ones prefixed by >>):

   include/linux/init.h:1: warning: no structured comments found
   kernel/kthread.c:860: warning: No description found for parameter 'dwork'
   kernel/kthread.c:860: warning: No description found for parameter 'delay'
   kernel/kthread.c:860: warning: Excess function parameter 'work' description in 'queue_delayed_kthread_work'
>> kernel/kthread.c:1012: warning: bad line: 
   kernel/sys.c:1: warning: no structured comments found
   drivers/dma-buf/seqno-fence.c:1: warning: no structured comments found
   drivers/dma-buf/reservation.c:1: warning: no structured comments found
   include/linux/reservation.h:1: warning: no structured comments found
   include/linux/spi/spi.h:540: warning: No description found for parameter 'max_transfer_size'

vim +1012 kernel/kthread.c

   854	 * either the timer was running or the work was queued. It returns %true
   855	 * otherwise.
   856	 */
   857	bool queue_delayed_kthread_work(struct kthread_worker *worker,
   858					struct delayed_kthread_work *dwork,
   859					unsigned long delay)
 > 860	{
   861		struct kthread_work *work = &dwork->work;
   862		unsigned long flags;
   863		bool ret = false;
   864	
   865		spin_lock_irqsave(&worker->lock, flags);
   866	
   867		if (!queuing_blocked(work)) {
   868			__queue_delayed_kthread_work(worker, dwork, delay);
   869			ret = true;
   870		}
   871	
   872		spin_unlock_irqrestore(&worker->lock, flags);
   873		return ret;
   874	}
   875	EXPORT_SYMBOL_GPL(queue_delayed_kthread_work);
   876	
   877	struct kthread_flush_work {
   878		struct kthread_work	work;
   879		struct completion	done;
   880	};
   881	
   882	static void kthread_flush_work_fn(struct kthread_work *work)
   883	{
   884		struct kthread_flush_work *fwork =
   885			container_of(work, struct kthread_flush_work, work);
   886		complete(&fwork->done);
   887	}
   888	
   889	/**
   890	 * flush_kthread_work - flush a kthread_work
   891	 * @work: work to flush
   892	 *
   893	 * If @work is queued or executing, wait for it to finish execution.
   894	 */
   895	void flush_kthread_work(struct kthread_work *work)
   896	{
   897		struct kthread_flush_work fwork = {
   898			KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
   899			COMPLETION_INITIALIZER_ONSTACK(fwork.done),
   900		};
   901		struct kthread_worker *worker;
   902		bool noop = false;
   903	
   904		worker = work->worker;
   905		if (!worker)
   906			return;
   907	
   908		spin_lock_irq(&worker->lock);
   909		/* Work must not be used with more workers, see queue_kthread_work(). */
   910		WARN_ON_ONCE(work->worker != worker);
   911	
   912		if (!list_empty(&work->node))
   913			insert_kthread_work(worker, &fwork.work, work->node.next);
   914		else if (worker->current_work == work)
   915			insert_kthread_work(worker, &fwork.work, worker->work_list.next);
   916		else
   917			noop = true;
   918	
   919		spin_unlock_irq(&worker->lock);
   920	
   921		if (!noop)
   922			wait_for_completion(&fwork.done);
   923	}
   924	EXPORT_SYMBOL_GPL(flush_kthread_work);
   925	
   926	/*
   927	 * This function removes the work from the worker queue. Also it makes sure
   928	 * that it won't get queued later via the delayed work's timer.
   929	 *
   930	 * The work might still be in use when this function finishes. See the
   931	 * current_work proceed by the worker.
   932	 *
   933	 * Return: %true if @work was pending and successfully canceled,
   934	 *	%false if @work was not pending
   935	 */
   936	static bool __cancel_kthread_work(struct kthread_work *work, bool is_dwork)
   937	{
   938		/* Try to cancel the timer if exists. */
   939		if (is_dwork) {
   940			struct delayed_kthread_work *dwork =
   941				container_of(work, struct delayed_kthread_work, work);
   942	
   943			del_timer_sync(&dwork->timer);
   944		}
   945	
   946		/*
   947		 * Try to remove the work from a worker list. It might either
   948		 * be from worker->work_list or from worker->delayed_work_list.
   949		 *
   950		 * Note that the work is still in the delayed list when del_timer_sync()
   951		 * raced with the timer callback. In this case the callback was not able
   952		 * to take the lock and move the work to the normal list.
   953		 */
   954		if (!list_empty(&work->node)) {
   955			list_del_init(&work->node);
   956			return true;
   957		}
   958	
   959		return false;
   960	}
   961	
   962	static bool __cancel_kthread_work_sync(struct kthread_work *work, bool is_dwork)
   963	{
   964		struct kthread_worker *worker = work->worker;
   965		unsigned long flags;
   966		int ret = false;
   967	
   968		if (!worker)
   969			goto out;
   970	
   971		spin_lock_irqsave(&worker->lock, flags);
   972		/* Work must not be used with more workers, see queue_kthread_work(). */
   973		WARN_ON_ONCE(worker != work->worker);
   974	
   975		/*
   976		 * work->canceling has two functions here. It blocks queueing until
   977		 * the cancel operation is complete. Also it tells the timer callback
   978		 * that it cannot take the worker lock. It prevents a deadlock between
   979		 * the callback and del_timer_sync().
   980		 */
   981		work->canceling++;
   982		ret = __cancel_kthread_work(work, is_dwork);
   983	
   984		if (worker->current_work != work)
   985			goto out_fast;
   986	
   987		spin_unlock_irqrestore(&worker->lock, flags);
   988		flush_kthread_work(work);
   989		/*
   990		 * Nobody is allowed to switch the worker or queue the work
   991		 * when .canceling is set.
   992		 */
   993		spin_lock_irqsave(&worker->lock, flags);
   994	
   995	out_fast:
   996		work->canceling--;
   997		spin_unlock_irqrestore(&worker->lock, flags);
   998	out:
   999		return ret;
  1000	}
  1001	
  1002	/**
  1003	 * cancel_kthread_work_sync - cancel a kthread work and wait for it to finish
  1004	 * @work: the kthread work to cancel
  1005	 *
  1006	 * Cancel @work and wait for its execution to finish.  This function
  1007	 * can be used even if the work re-queues itself. On return from this
  1008	 * function, @work is guaranteed to be not pending or executing on any CPU.
  1009	 *
  1010	 * cancel_kthread_work_sync(&delayed_work->work) must not be used for
  1011	 * delayed_work's. Use cancel_delayed_kthread_work_sync() instead.
> 1012	
  1013	 * The caller must ensure that the worker on which @work was last
  1014	 * queued can't be destroyed before this function returns.
  1015	 *

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: Binary data


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux