Introduce an unlocked version of iio_map_array_unregister(). This function can help to unwind in case of error while the iio_map_list_lock mutex is held. Signed-off-by: Lino Sanfilippo <LinoSanfilippo@xxxxxx> Reviewed-by: Andy Shevchenko <andy.shevchenko@xxxxxxxxx> --- drivers/iio/inkern.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index ede99e0..39c1d63 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -24,6 +24,21 @@ struct iio_map_internal { static LIST_HEAD(iio_map_list); static DEFINE_MUTEX(iio_map_list_lock); +static int iio_map_array_unregister_locked(struct iio_dev *indio_dev) +{ + int ret = -ENODEV; + struct iio_map_internal *mapi, *next; + + list_for_each_entry_safe(mapi, next, &iio_map_list, l) { + if (indio_dev == mapi->indio_dev) { + list_del(&mapi->l); + kfree(mapi); + ret = 0; + } + } + return ret; +} + int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps) { int i = 0, ret = 0; @@ -57,18 +72,12 @@ EXPORT_SYMBOL_GPL(iio_map_array_register); */ int iio_map_array_unregister(struct iio_dev *indio_dev) { - int ret = -ENODEV; - struct iio_map_internal *mapi, *next; + int ret; mutex_lock(&iio_map_list_lock); - list_for_each_entry_safe(mapi, next, &iio_map_list, l) { - if (indio_dev == mapi->indio_dev) { - list_del(&mapi->l); - kfree(mapi); - ret = 0; - } - } + ret = iio_map_array_unregister_locked(indio_dev); mutex_unlock(&iio_map_list_lock); + return ret; } EXPORT_SYMBOL_GPL(iio_map_array_unregister); -- 2.7.4