3.16-stable review patch. If anyone has any objections, please let me know. ------------------ From: Srinivas Pandruvada <srinivas.pandruvada@xxxxxxxxxxxxxxx> commit 0495081179212b758775df752e657ea71dcae020 upstream. This can result in wrong reference count for trigger device, call iio_trigger_get to increment reference. Refer to http://www.spinics.net/lists/linux-iio/msg13669.html for discussion with Jonathan. Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@xxxxxxxxxxxxxxx> Acked-by: Lars-Peter Clausen <lars@xxxxxxxxxx> Signed-off-by: Jonathan Cameron <jic23@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/staging/iio/meter/ade7758_trigger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) --- a/drivers/staging/iio/meter/ade7758_trigger.c +++ b/drivers/staging/iio/meter/ade7758_trigger.c @@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev ret = iio_trigger_register(st->trig); /* select default trigger */ - indio_dev->trig = st->trig; + indio_dev->trig = iio_trigger_get(st->trig); if (ret) goto error_free_irq; tm_page_alloc.c @@ -391,14 +391,17 @@ out: static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static DEFINE_MUTEX(lock); + static unsigned start_pool; unsigned i; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; - pool_offset = pool_offset % NUM_POOLS; + if (!mutex_trylock(&lock)) + return SHRINK_STOP; + pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; @@ -408,6 +411,7 @@ ttm_pool_shrink_scan(struct shrinker *sh shrink_pages = ttm_page_pool_free(pool, nr_free); freed += nr_free - shrink_pages; } + mutex_unlock(&lock); return freed; } -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html