From: Abel Vesa <abel.vesa@xxxxxxxxxx> commit 9446fa1683a7e3937d9970248ced427c1983a1c5 upstream. Currently, there is a race window between the point when the mutex is unlocked in fastrpc_map_lookup and the reference count increasing (fastrpc_map_get) in fastrpc_map_find, which can also lead to use-after-free. So lets merge fastrpc_map_find into fastrpc_map_lookup which allows us to both protect the maps list by also taking the &fl->lock spinlock and the reference count, since the spinlock will be released only after. Add take_ref argument to make this suitable for all callers. Fixes: 8f6c1d8c4f0c ("misc: fastrpc: Add fdlist implementation") Cc: stable <stable@xxxxxxxxxx> Co-developed-by: Ola Jeppsson <ola@xxxxxxxx> Signed-off-by: Ola Jeppsson <ola@xxxxxxxx> Signed-off-by: Abel Vesa <abel.vesa@xxxxxxxxxx> Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@xxxxxxxxxx> Link: https://lore.kernel.org/r/20221124174941.418450-2-srinivas.kandagatla@xxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/misc/fastrpc.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -333,30 +333,31 @@ static void fastrpc_map_get(struct fastr static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, - struct fastrpc_map **ppmap) + struct fastrpc_map **ppmap, bool take_ref) { + struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; + int ret = -ENOENT; - mutex_lock(&fl->mutex); + spin_lock(&fl->lock); list_for_each_entry(map, &fl->maps, node) { - if (map->fd == fd) { - *ppmap = map; - mutex_unlock(&fl->mutex); - return 0; - } - } - mutex_unlock(&fl->mutex); - - return -ENOENT; -} + if (map->fd != fd) + continue; -static int fastrpc_map_find(struct fastrpc_user *fl, int fd, - struct fastrpc_map **ppmap) -{ - int ret = fastrpc_map_lookup(fl, fd, ppmap); + if (take_ref) { + ret = fastrpc_map_get(map); + if (ret) { + dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", + __func__, fd, ret); + break; + } + } - if (!ret) - fastrpc_map_get(*ppmap); + *ppmap = map; + ret = 0; + break; + } + spin_unlock(&fl->lock); return ret; } @@ -703,7 +704,7 @@ static int fastrpc_map_create(struct fas struct fastrpc_map *map = NULL; int err = 0; - if (!fastrpc_map_find(fl, fd, ppmap)) + if (!fastrpc_map_lookup(fl, fd, ppmap, true)) return 0; map = kzalloc(sizeof(*map), GFP_KERNEL); @@ -1026,7 +1027,7 @@ static int fastrpc_put_args(struct fastr for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { if (!fdlist[i]) break; - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) fastrpc_map_put(mmap); }