> -----Original Message----- > From: Stephen Hemminger [mailto:stephen@xxxxxxxxxxxxxxxxxx] > Sent: Monday, January 23, 2017 5:40 PM > To: KY Srinivasan <kys@xxxxxxxxxxxxx>; Haiyang Zhang > <haiyangz@xxxxxxxxxxxxx> > Cc: devel@xxxxxxxxxxxxxxxxxxxxxx; Stephen Hemminger > <sthemmin@xxxxxxxxxxxxx> > Subject: [PATCH 07/14] vmbus: remove conditional locking of vmbus_write > > All current usage of vmbus write uses the acquire_lock flag, therefore > having it be optional is unnecessary. This also fixes a sparse warning > since sparse doesn't like when a function has conditional locking. In order to avoid cross-tree dependency, I got this functionality into Greg's tree first. I plan to submit patches to netvsc that will use this functionality. As you know, we hold a higher level lock in the protocol stack when we send on a sub-channel. This will avoid an unnecessary spin lock round trip in the data path. Regards, K. Y > > Signed-off-by: Stephen Hemminger <sthemmin@xxxxxxxxxxxxx> > --- > drivers/hv/channel.c | 13 ++++--------- > drivers/hv/channel_mgmt.c | 1 - > drivers/hv/hyperv_vmbus.h | 3 +-- > drivers/hv/ring_buffer.c | 11 ++++------- > include/linux/hyperv.h | 15 --------------- > 5 files changed, 9 insertions(+), 34 deletions(-) > > diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c > index fd73ce1340be..61e2cc1b653f 100644 > --- a/drivers/hv/channel.c > +++ b/drivers/hv/channel.c > @@ -651,7 +651,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel > *channel, void *buffer, > u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); > struct kvec bufferlist[3]; > u64 aligned_data = 0; > - bool lock = channel->acquire_ring_lock; > int num_vecs = ((bufferlen != 0) ? 3 : 1); > > > @@ -670,7 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel > *channel, void *buffer, > bufferlist[2].iov_base = &aligned_data; > bufferlist[2].iov_len = (packetlen_aligned - packetlen); > > - return hv_ringbuffer_write(channel, bufferlist, num_vecs, lock); > + return hv_ringbuffer_write(channel, bufferlist, num_vecs); > } > EXPORT_SYMBOL(vmbus_sendpacket_ctl); > > @@ -718,12 +717,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct > vmbus_channel *channel, > u32 packetlen_aligned; > struct kvec bufferlist[3]; > u64 aligned_data = 0; > - bool lock = channel->acquire_ring_lock; > > if (pagecount > MAX_PAGE_BUFFER_COUNT) > return -EINVAL; > > - > /* > * Adjust the size down since vmbus_channel_packet_page_buffer is > the > * largest size we support > @@ -755,7 +752,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct > vmbus_channel *channel, > bufferlist[2].iov_base = &aligned_data; > bufferlist[2].iov_len = (packetlen_aligned - packetlen); > > - return hv_ringbuffer_write(channel, bufferlist, 3, lock); > + return hv_ringbuffer_write(channel, bufferlist, 3); > } > EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); > > @@ -790,7 +787,6 @@ int vmbus_sendpacket_mpb_desc(struct > vmbus_channel *channel, > u32 packetlen_aligned; > struct kvec bufferlist[3]; > u64 aligned_data = 0; > - bool lock = channel->acquire_ring_lock; > > packetlen = desc_size + bufferlen; > packetlen_aligned = ALIGN(packetlen, sizeof(u64)); > @@ -810,7 +806,7 @@ int vmbus_sendpacket_mpb_desc(struct > vmbus_channel *channel, > bufferlist[2].iov_base = &aligned_data; > bufferlist[2].iov_len = (packetlen_aligned - packetlen); > > - return hv_ringbuffer_write(channel, bufferlist, 3, lock); > + return hv_ringbuffer_write(channel, bufferlist, 3); > } > EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); > > @@ -828,7 +824,6 @@ int vmbus_sendpacket_multipagebuffer(struct > vmbus_channel *channel, > u32 packetlen_aligned; > struct kvec bufferlist[3]; > u64 aligned_data = 0; > - bool lock = channel->acquire_ring_lock; > u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, > multi_pagebuffer->len); > > @@ -867,7 +862,7 @@ int vmbus_sendpacket_multipagebuffer(struct > vmbus_channel *channel, > bufferlist[2].iov_base = &aligned_data; > bufferlist[2].iov_len = (packetlen_aligned - packetlen); > > - return hv_ringbuffer_write(channel, bufferlist, 3, lock); > + return hv_ringbuffer_write(channel, bufferlist, 3); > } > EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); > > diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c > index 49d77be90ca4..b196bdfea8c8 100644 > --- a/drivers/hv/channel_mgmt.c > +++ b/drivers/hv/channel_mgmt.c > @@ -304,7 +304,6 @@ static struct vmbus_channel *alloc_channel(void) > if (!channel) > return NULL; > > - channel->acquire_ring_lock = true; > spin_lock_init(&channel->inbound_lock); > spin_lock_init(&channel->lock); > > diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h > index 0a884cef4193..0c9516d0bf36 100644 > --- a/drivers/hv/hyperv_vmbus.h > +++ b/drivers/hv/hyperv_vmbus.h > @@ -277,8 +277,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info > *ring_info, > void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); > > int hv_ringbuffer_write(struct vmbus_channel *channel, > - struct kvec *kv_list, > - u32 kv_count, bool lock); > + struct kvec *kv_list, u32 kv_count); > > int hv_ringbuffer_read(struct vmbus_channel *channel, > void *buffer, u32 buflen, u32 *buffer_actual_len, > diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c > index 2325242029c5..04b5c5fce7ae 100644 > --- a/drivers/hv/ring_buffer.c > +++ b/drivers/hv/ring_buffer.c > @@ -256,7 +256,7 @@ void hv_ringbuffer_cleanup(struct > hv_ring_buffer_info *ring_info) > > /* Write to the ring buffer. */ > int hv_ringbuffer_write(struct vmbus_channel *channel, > - struct kvec *kv_list, u32 kv_count, bool lock) > + struct kvec *kv_list, u32 kv_count) > { > int i = 0; > u32 bytes_avail_towrite; > @@ -276,8 +276,7 @@ int hv_ringbuffer_write(struct vmbus_channel > *channel, > > totalbytes_towrite += sizeof(u64); > > - if (lock) > - spin_lock_irqsave(&outring_info->ring_lock, flags); > + spin_lock_irqsave(&outring_info->ring_lock, flags); > > bytes_avail_towrite = hv_get_bytes_to_write(outring_info); > > @@ -287,8 +286,7 @@ int hv_ringbuffer_write(struct vmbus_channel > *channel, > * is empty since the read index == write index. > */ > if (bytes_avail_towrite <= totalbytes_towrite) { > - if (lock) > - spin_unlock_irqrestore(&outring_info->ring_lock, > flags); > + spin_unlock_irqrestore(&outring_info->ring_lock, flags); > return -EAGAIN; > } > > @@ -317,8 +315,7 @@ int hv_ringbuffer_write(struct vmbus_channel > *channel, > /* Now, update the write location */ > outring_info->ring_buffer->write_index = next_write_location; > > - if (lock) > - spin_unlock_irqrestore(&outring_info->ring_lock, flags); > + spin_unlock_irqrestore(&outring_info->ring_lock, flags); > > hv_signal_on_write(old_write, channel); > > diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h > index b691a6530e85..6a748f7406c6 100644 > --- a/include/linux/hyperv.h > +++ b/include/linux/hyperv.h > @@ -832,16 +832,6 @@ struct vmbus_channel { > */ > struct list_head percpu_list; > /* > - * On the channel send side, many of the VMBUS > - * device drivers explicity serialize access to the > - * outgoing ring buffer. Give more control to the > - * VMBUS device drivers in terms how to serialize > - * accesss to the outgoing ring buffer. > - * The default behavior will be to aquire the > - * ring lock to preserve the current behavior. > - */ > - bool acquire_ring_lock; > - /* > * For performance critical channels (storage, networking > * etc,), Hyper-V has a mechanism to enhance the throughput > * at the expense of latency: > @@ -881,11 +871,6 @@ struct vmbus_channel { > > }; > > -static inline void set_channel_lock_state(struct vmbus_channel *c, bool > state) > -{ > - c->acquire_ring_lock = state; > -} > - > static inline bool is_hvsock_channel(const struct vmbus_channel *c) > { > return !!(c->offermsg.offer.chn_flags & > -- > 2.11.0 _______________________________________________ devel mailing list devel@xxxxxxxxxxxxxxxxxxxxxx http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel