4.14-stable review patch. If anyone has any objections, please let me know. ------------------ From: Michael Kelley <mhkelley@xxxxxxxxxxx> commit 655296c8bbeffcf020558c4455305d597a73bde1 upstream. Fix bugs in signaling the Hyper-V host when freeing space in the host->guest ring buffer: 1. The interrupt_mask must not be used to determine whether to signal on the host->guest ring buffer 2. The ring buffer write_index must be read (via hv_get_bytes_to_write) *after* pending_send_sz is read in order to avoid a race condition 3. Comparisons with pending_send_sz must treat the "equals" case as not-enough-space 4. Don't signal if the pending_send_sz feature is not present. Older versions of Hyper-V that don't implement this feature will poll. Fixes: 03bad714a161 ("vmbus: more host signalling avoidance") Cc: Stable <stable@xxxxxxxxxxxxxxx> # 4.14 and above Signed-off-by: Michael Kelley <mhkelley@xxxxxxxxxxx> Signed-off-by: K. Y. Srinivasan <kys@xxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/hv/ring_buffer.c | 54 +++++++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 18 deletions(-) --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -394,13 +394,24 @@ __hv_pkt_iter_next(struct vmbus_channel } EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); +/* How many bytes were read in this iterator cycle */ +static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, + u32 start_read_index) +{ + if (rbi->priv_read_index >= start_read_index) + return rbi->priv_read_index - start_read_index; + else + return rbi->ring_datasize - start_read_index + + rbi->priv_read_index; +} + /* * Update host ring buffer after iterating over packets. */ void hv_pkt_iter_close(struct vmbus_channel *channel) { struct hv_ring_buffer_info *rbi = &channel->inbound; - u32 orig_write_sz = hv_get_bytes_to_write(rbi); + u32 curr_write_sz, pending_sz, bytes_read, start_read_index; /* * Make sure all reads are done before we update the read index since @@ -408,8 +419,12 @@ void hv_pkt_iter_close(struct vmbus_chan * is updated. */ virt_rmb(); + start_read_index = rbi->ring_buffer->read_index; rbi->ring_buffer->read_index = rbi->priv_read_index; + if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) + return; + /* * Issue a full memory barrier before making the signaling decision. * Here is the reason for having this barrier: @@ -423,26 +438,29 @@ void hv_pkt_iter_close(struct vmbus_chan */ virt_mb(); - /* If host has disabled notifications then skip */ - if (rbi->ring_buffer->interrupt_mask) + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + if (!pending_sz) return; - if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { - u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + /* + * Ensure the read of write_index in hv_get_bytes_to_write() + * happens after the read of pending_send_sz. + */ + virt_rmb(); + curr_write_sz = hv_get_bytes_to_write(rbi); + bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); + + /* + * If there was space before we began iteration, + * then host was not blocked. + */ - /* - * If there was space before we began iteration, - * then host was not blocked. Also handles case where - * pending_sz is zero then host has nothing pending - * and does not need to be signaled. - */ - if (orig_write_sz > pending_sz) - return; - - /* If pending write will not fit, don't give false hope. */ - if (hv_get_bytes_to_write(rbi) < pending_sz) - return; - } + if (curr_write_sz - bytes_read > pending_sz) + return; + + /* If pending write will not fit, don't give false hope. */ + if (curr_write_sz <= pending_sz) + return; vmbus_setevent(channel); }