From: Kent Overstreet <kmo@xxxxxxxxxxxxx> This patch changes percpu_ida_alloc() to accept task state bitmask for prepare_to_wait() to support interruptible sleep for callers that require it. Previously, this code was assuming uninterruptible sleep for all cases thus preventing signals from being delivered to a scheduled process context in the tag stealing slow path. It now accepts TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE when the caller is able to sleep waiting for a new tag, or TASK_RUNNING when the caller cannot sleep, and is forced to return a negative tag. Reported-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Signed-off-by: Kent Overstreet <kmo@xxxxxxxxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> #3.12+ Signed-off-by: Nicholas Bellinger <nab@xxxxxxxxxxxxxxx> --- include/linux/percpu_ida.h | 3 ++- lib/percpu_ida.c | 17 +++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h index 1900bd0..f5cfdd6 100644 --- a/include/linux/percpu_ida.h +++ b/include/linux/percpu_ida.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/bitops.h> #include <linux/init.h> +#include <linux/sched.h> #include <linux/spinlock_types.h> #include <linux/wait.h> #include <linux/cpumask.h> @@ -61,7 +62,7 @@ struct percpu_ida { /* Max size of percpu freelist, */ #define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) -int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); +int percpu_ida_alloc(struct percpu_ida *pool, int state); void percpu_ida_free(struct percpu_ida *pool, unsigned tag); void percpu_ida_destroy(struct percpu_ida *pool); diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9d054bf..4579749 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -132,22 +132,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) /** * percpu_ida_alloc - allocate a tag * @pool: pool to allocate from - * @gfp: gfp flags + * @state: task state for prepare_to_wait * * Returns a tag - an integer in the range [0..nr_tags) (passed to * tag_pool_init()), or otherwise -ENOSPC on allocation failure. * * Safe to be called from interrupt context (assuming it isn't passed - * __GFP_WAIT, of course). + * TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, of course). * * @gfp indicates whether or not to wait until a free id is available (it's not * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep * however long it takes until another thread frees an id (same semantics as a * mempool). * - * Will not fail if passed __GFP_WAIT. + * Will not fail if passed TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE. */ -int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) +int percpu_ida_alloc(struct percpu_ida *pool, int state) { DEFINE_WAIT(wait); struct percpu_ida_cpu *tags; @@ -174,7 +174,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) * * global lock held and irqs disabled, don't need percpu lock */ - prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(&pool->wait, &wait, state); if (!tags->nr_free) alloc_global_tags(pool, tags); @@ -191,9 +191,14 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) spin_unlock(&pool->lock); local_irq_restore(flags); - if (tag >= 0 || !(gfp & __GFP_WAIT)) + if (tag >= 0 || state == TASK_RUNNING) break; + if (signal_pending_state(state, current)) { + tag = -ERESTARTSYS; + break; + } + schedule(); local_irq_save(flags); -- 1.7.10.4 -- To unsubscribe from this list: send the line "unsubscribe target-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html