The primitive currently uses channel->lock to protect the loop over sc_list w.r.t. list additions/deletions but it doesn't protect the target_cpu(s) loads w.r.t. a concurrent target_cpu_store(): while the data races on target_cpu are hardly of any concern here, replace the channel->lock critical section with a channel_mutex critical section and extend the latter to include the loads of target_cpu; this same pattern is also used in hv_synic_cleanup(). Signed-off-by: Andrea Parri (Microsoft) <parri.andrea@xxxxxxxxx> --- drivers/hv/vmbus_drv.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index b5ae45eb8aef7..9e39692dc13ee 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -507,18 +507,17 @@ static ssize_t channel_vp_mapping_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct vmbus_channel *channel = hv_dev->channel, *cur_sc; - unsigned long flags; int buf_size = PAGE_SIZE, n_written, tot_written; struct list_head *cur; if (!channel) return -ENODEV; + mutex_lock(&vmbus_connection.channel_mutex); + tot_written = snprintf(buf, buf_size, "%u:%u\n", channel->offermsg.child_relid, channel->target_cpu); - spin_lock_irqsave(&channel->lock, flags); - list_for_each(cur, &channel->sc_list) { if (tot_written >= buf_size - 1) break; @@ -532,7 +531,7 @@ static ssize_t channel_vp_mapping_show(struct device *dev, tot_written += n_written; } - spin_unlock_irqrestore(&channel->lock, flags); + mutex_unlock(&vmbus_connection.channel_mutex); return tot_written; } -- 2.25.1