Re: [RFC-V3 4/7] [net/9p] Add gup/zero_copy support to VirtIO transport layer.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2/11/2011 9:58 AM, Aneesh Kumar K. V wrote:
> On Thu, 10 Feb 2011 17:25:08 -0800, "Venkateswararao Jujjuri (JV)" <jvrao@xxxxxxxxxxxxxxxxxx> wrote:
>> Signed-off-by: Venkateswararao Jujjuri <jvrao@xxxxxxxxxxxxxxxxxx>
>> ---
>>  net/9p/trans_virtio.c |  107 ++++++++++++++++++++++++++++++++++++++++++++++---
>>  1 files changed, 101 insertions(+), 6 deletions(-)
>>
>> diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
>> index c8f3f72..e0c301f 100644
>> --- a/net/9p/trans_virtio.c
>> +++ b/net/9p/trans_virtio.c
>> @@ -45,6 +45,7 @@
>>  #include <linux/scatterlist.h>
>>  #include <linux/virtio.h>
>>  #include <linux/virtio_9p.h>
>> +#include "trans_common.h"
>>
>>  #define VIRTQUEUE_NUM	128
>>
>> @@ -155,6 +156,14 @@ static void req_done(struct virtqueue *vq)
>>  					rc->tag);
>>  			req = p9_tag_lookup(chan->client, rc->tag);
>>  			req->status = REQ_STATUS_RCVD;
>> +			if (req->tc->private) {
>> +				struct trans_rpage_info *rp = req->tc->private;
>> +				/*Release pages */
>> +				p9_release_req_pages(rp);
>> +				if (rp->rp_alloc)
>> +					kfree(rp);
>> +				req->tc->private = NULL;
>> +			}
>>  			p9_client_cb(chan->client, req);
>>  		} else {
>>  			spin_unlock_irqrestore(&chan->lock, flags);
>> @@ -202,6 +211,30 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
>>  	return 1;
>>  }
>>
>> +static int
>> +pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
>> +		struct page **pdata, int count)
>> +{
>> +	int s;
>> +	int i = 0;
>> +	int index = start;
>> +
>> +	if (pdata_off) {
>> +		s = min((int)(PAGE_SIZE - pdata_off), count);
>> +		sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
>> +		count -= s;
>> +	}
>> +
>> +	while (count) {
>> +		BUG_ON(index > limit);
>> +		s = min((int)PAGE_SIZE, count);
>> +		sg_set_page(&sg[index++], pdata[i++], s, 0);
>> +		count -= s;
>> +	}
>> +
>> +	return index-start;
>> +}
>> +
>>  /**
>>   * p9_virtio_request - issue a request
>>   * @client: client instance issuing the request
>> @@ -212,22 +245,82 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
>>  static int
>>  p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
>>  {
>> -	int in, out;
>> +	int in, out, inp, outp;
>>  	struct virtio_chan *chan = client->trans;
>>  	char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
>>  	unsigned long flags;
>> -	int err;
>> +	size_t pdata_off = 0;
>> +	struct trans_rpage_info *rpinfo = NULL;
>> +	int err, pdata_len = 0;
>>
>>  	P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
>>
>>  req_retry:
>>  	req->status = REQ_STATUS_SENT;
>>
>> +	if (req->tc->pbuf_size &&
>> +			(req->tc->pubuf && !segment_eq(get_fs(), KERNEL_DS))) {
>> +		int nr_pages = p9_nr_pages(req);
>> +		int rpinfo_size = sizeof(struct trans_rpage_info) +
>> +			sizeof(struct page *) * nr_pages;
>> +
>> +		if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
>> +			/* We can use sdata */
>> +			req->tc->private = req->tc->sdata + req->tc->size;
>> +			rpinfo = (struct trans_rpage_info *)req->tc->private;
>> +			rpinfo->rp_alloc = 0;
>> +		} else {
>> +			req->tc->private = kmalloc(rpinfo_size, GFP_KERNEL);
>> +			rpinfo = (struct trans_rpage_info *)req->tc->private;
>> +			rpinfo->rp_alloc = 1;
>> +		}
>> +
>> +		err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
>> +				req->tc->id == P9_TREAD ? 1 : 0);
>> +		if (err < 0) {
>> +			if (rpinfo->rp_alloc)
>> +				kfree(rpinfo);
>> +			return err;
>> +		}
>> +	}
>> +
>>  	spin_lock_irqsave(&chan->lock, flags);
>> -	out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
>> -								req->tc->size);
>> -	in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
>> -								client->msize);
>> +
>> +	/* Handle out VirtIO ring buffers */
>> +	if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
>> +		/* We have additional write payload buffer to take care */
>> +		out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
>> +				req->tc->size);
>> +		outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
>> +				pdata_off, rpinfo->rp_data, pdata_len);
>> +		out += outp;
>> +	} else {
>> +		out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
>> +				req->tc->size);
>> +	}
> 
> How about  ?

Yes, This is something I wanted to do but missed out. Good catch.

Thanks,
JV

> 
>   out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
>   /*
>    * We have additional write payload buffer to take care
>    * in case of zero copy and the request is TWRITE
>    */
>    if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
>           outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
> 				pdata_off, rpinfo->rp_data, pdata_len);
> 	  out += outp;
>    }
> 
> 
>> +
>> +	/* Handle in VirtIO ring buffers */
>> +	if (req->tc->pbuf_size && (req->tc->id == P9_TREAD)) {
>> +		/* We have additional Read payload buffer to take care */
>> +		inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
>> +		/*
>> +		 * Running executables in the filesystem may result in
>> +		 * a read request with kernel buffer as opposed to user buffer.
>> +		 */
>> +		if (req->tc->pubuf && !segment_eq(get_fs(), KERNEL_DS)) {
>> +			in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
>> +					pdata_off, rpinfo->rp_data, pdata_len);
>> +		} else {
>> +			char *pbuf = req->tc->pubuf ? req->tc->pubuf :
>> +								req->tc->pkbuf;
>> +			in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
>> +					pbuf, req->tc->pbuf_size);
>> +		}
>> +		in += inp;
>> +	} else {
>> +		in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
>> +				client->msize);
>> +	}
>>
>>  	err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
>>  	if (err < 0) {
>> @@ -246,6 +339,8 @@ req_retry:
>>  			P9_DPRINTK(P9_DEBUG_TRANS,
>>  					"9p debug: "
>>  					"virtio rpc add_buf returned failure");
>> +			if (rpinfo && rpinfo->rp_alloc)
>> +				kfree(rpinfo);
>>  			return -EIO;
>>  		}
>>  	}
> 
> -aneesh


--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]
  Powered by Linux