Re: [PATCH for-next 1/3] IB/hfi1: Move rvt_cq_wc struct into uapi directory

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Sep 26, 2018 at 10:02:13AM -0700, Dennis Dalessandro wrote:
> From: Kamenee Arumugam <kamenee.arumugam@xxxxxxxxx>
>
> The rvt_cq_wc struct elements are shared between rdmavt
> and the providers but not in uapi directory.
> As the per comment in below's link, hfi1 driver is not
> instructed to use kernel-headers and uapi header.
> https://patchwork.kernel.org/patch/10323739/
>
> Move rvt_cq_wc struct into rvt-abi.h header in uapi
> directory. Create rvt_k_cq_w kernel struct to separate
> it from the user version.
>
> Signed-off-by: Kamenee Arumugam <kamenee.arumugam@xxxxxxxxx>
> Reviewed-by: Mike Marciniszyn <mike.marciniszyn@xxxxxxxxx>
> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@xxxxxxxxx>
> ---
>  drivers/infiniband/hw/hfi1/qp.c   |   32 ++++++
>  drivers/infiniband/sw/rdmavt/cq.c |  190 ++++++++++++++++++++++++-------------
>  include/rdma/rdmavt_cq.h          |   10 +-
>  include/uapi/rdma/rvt-abi.h       |   34 +++++++
>  4 files changed, 190 insertions(+), 76 deletions(-)
>  create mode 100644 include/uapi/rdma/rvt-abi.h
>
> diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
> index 54d9ff1..17ccd47 100644
> --- a/drivers/infiniband/hw/hfi1/qp.c
> +++ b/drivers/infiniband/hw/hfi1/qp.c
> @@ -541,6 +541,34 @@ static int qp_idle(struct rvt_qp *qp)
>  }
>
>  /**
> + * ib_cq_tail - Return tail index of cq buffer
> + * @send_cq - The cq for send
> + *
> + * This is called in qp_iter_print to get tail
> + * of cq buffer.
> + */
> +static u32 ib_cq_tail(struct ib_cq *send_cq)
> +{
> +	return ibcq_to_rvtcq(send_cq)->ip ?
> +	       ibcq_to_rvtcq(send_cq)->queue->tail :
> +	       ibcq_to_rvtcq(send_cq)->kqueue->tail;
> +}
> +
> +/**
> + * ib_cq_head - Return head index of cq buffer
> + * @send_cq - The cq for send
> + *
> + * This is called in qp_iter_print to get head
> + * of cq buffer.
> + */
> +static u32 ib_cq_head(struct ib_cq *send_cq)
> +{
> +	return ibcq_to_rvtcq(send_cq)->ip ?
> +	       ibcq_to_rvtcq(send_cq)->queue->head :
> +	       ibcq_to_rvtcq(send_cq)->kqueue->head;
> +}
> +
> +/**
>   * qp_iter_print - print the qp information to seq_file
>   * @s: the seq_file to emit the qp information on
>   * @iter: the iterator for the qp hash list
> @@ -600,8 +628,8 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
>  		   sde ? sde->this_idx : 0,
>  		   send_context,
>  		   send_context ? send_context->sw_index : 0,
> -		   ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
> -		   ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
> +		   ib_cq_head(qp->ibqp.send_cq),
> +		   ib_cq_tail(qp->ibqp.send_cq),
>  		   qp->pid,
>  		   qp->s_state,
>  		   qp->s_ack_state,
> diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
> index 4f1544a..6406a08 100644
> --- a/drivers/infiniband/sw/rdmavt/cq.c
> +++ b/drivers/infiniband/sw/rdmavt/cq.c
> @@ -1,3 +1,4 @@
> +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
>  /*
>   * Copyright(c) 2016 - 2018 Intel Corporation.
>   *
> @@ -63,19 +64,33 @@
>   */
>  void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
>  {
> -	struct rvt_cq_wc *wc;
> +	struct ib_uverbs_wc *uqueue = NULL;
> +	struct ib_wc *kqueue = NULL;
> +	struct rvt_cq_wc *u_wc = NULL;
> +	struct rvt_k_cq_wc *k_wc = NULL;
>  	unsigned long flags;
>  	u32 head;
>  	u32 next;
> +	u32 tail;
>
>  	spin_lock_irqsave(&cq->lock, flags);
>
> +	if (cq->ip) {
> +		u_wc = cq->queue;
> +		uqueue = &u_wc->uqueue[0];
> +		head = u_wc->head;
> +		tail = u_wc->tail;
> +	} else {
> +		k_wc = cq->kqueue;
> +		kqueue = &k_wc->kqueue[0];
> +		head = k_wc->head;
> +		tail = k_wc->tail;
> +	}
> +
>  	/*
> -	 * Note that the head pointer might be writable by user processes.
> -	 * Take care to verify it is a sane value.
> +	 * Note that the head pointer might be writable by
> +	 * user processes.Take care to verify it is a sane value.
>  	 */
> -	wc = cq->queue;
> -	head = wc->head;
>  	if (head >= (unsigned)cq->ibcq.cqe) {
>  		head = cq->ibcq.cqe;
>  		next = 0;
> @@ -83,7 +98,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
>  		next = head + 1;
>  	}
>
> -	if (unlikely(next == wc->tail)) {
> +	if (unlikely(next == tail)) {
>  		spin_unlock_irqrestore(&cq->lock, flags);
>  		if (cq->ibcq.event_handler) {
>  			struct ib_event ev;
> @@ -96,27 +111,28 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
>  		return;
>  	}
>  	trace_rvt_cq_enter(cq, entry, head);
> -	if (cq->ip) {
> -		wc->uqueue[head].wr_id = entry->wr_id;
> -		wc->uqueue[head].status = entry->status;
> -		wc->uqueue[head].opcode = entry->opcode;
> -		wc->uqueue[head].vendor_err = entry->vendor_err;
> -		wc->uqueue[head].byte_len = entry->byte_len;
> -		wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
> -		wc->uqueue[head].qp_num = entry->qp->qp_num;
> -		wc->uqueue[head].src_qp = entry->src_qp;
> -		wc->uqueue[head].wc_flags = entry->wc_flags;
> -		wc->uqueue[head].pkey_index = entry->pkey_index;
> -		wc->uqueue[head].slid = ib_lid_cpu16(entry->slid);
> -		wc->uqueue[head].sl = entry->sl;
> -		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
> -		wc->uqueue[head].port_num = entry->port_num;
> +	if (uqueue) {
> +		uqueue[head].wr_id = entry->wr_id;
> +		uqueue[head].status = entry->status;
> +		uqueue[head].opcode = entry->opcode;
> +		uqueue[head].vendor_err = entry->vendor_err;
> +		uqueue[head].byte_len = entry->byte_len;
> +		uqueue[head].ex.imm_data = entry->ex.imm_data;
> +		uqueue[head].qp_num = entry->qp->qp_num;
> +		uqueue[head].src_qp = entry->src_qp;
> +		uqueue[head].wc_flags = entry->wc_flags;
> +		uqueue[head].pkey_index = entry->pkey_index;
> +		uqueue[head].slid = ib_lid_cpu16(entry->slid);
> +		uqueue[head].sl = entry->sl;
> +		uqueue[head].dlid_path_bits = entry->dlid_path_bits;
> +		uqueue[head].port_num = entry->port_num;
>  		/* Make sure entry is written before the head index. */
>  		smp_wmb();
> +		u_wc->head = next;
>  	} else {
> -		wc->kqueue[head] = *entry;
> +		kqueue[head] = *entry;
> +		k_wc->head = next;
>  	}
> -	wc->head = next;
>
>  	if (cq->notify == IB_CQ_NEXT_COMP ||
>  	    (cq->notify == IB_CQ_SOLICITED &&
> @@ -183,7 +199,8 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
>  {
>  	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
>  	struct rvt_cq *cq;
> -	struct rvt_cq_wc *wc;
> +	struct rvt_cq_wc *u_wc = NULL;
> +	struct rvt_k_cq_wc *k_wc = NULL;
>  	struct ib_cq *ret;
>  	u32 sz;
>  	unsigned int entries = attr->cqe;
> @@ -212,17 +229,22 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
>  	 * We need to use vmalloc() in order to support mmap and large
>  	 * numbers of entries.
>  	 */
> -	sz = sizeof(*wc);
> -	if (udata && udata->outlen >= sizeof(__u64))
> -		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
> -	else
> -		sz += sizeof(struct ib_wc) * (entries + 1);
> -	wc = udata ?
> -		vmalloc_user(sz) :
> -		vzalloc_node(sz, rdi->dparms.node);
> -	if (!wc) {
> -		ret = ERR_PTR(-ENOMEM);
> -		goto bail_cq;
> +	if (udata && udata->outlen >= sizeof(__u64)) {
> +		sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
> +		sz += sizeof(*u_wc);
> +		u_wc = vmalloc_user(sz);
> +		if (!u_wc) {
> +			ret = ERR_PTR(-ENOMEM);
> +			goto bail_cq;
> +		}
> +	} else {
> +		sz = sizeof(struct ib_wc) * (entries + 1);
> +		sz += sizeof(*k_wc);
> +		k_wc = vzalloc_node(sz, rdi->dparms.node);
> +		if (!k_wc) {
> +			ret = ERR_PTR(-ENOMEM);
> +			goto bail_cq;
> +		}
>  	}
>
>  	/*
> @@ -232,7 +254,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
>  	if (udata && udata->outlen >= sizeof(__u64)) {
>  		int err;
>
> -		cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
> +		cq->ip = rvt_create_mmap_info(rdi, sz, context, u_wc);
>  		if (!cq->ip) {
>  			ret = ERR_PTR(-ENOMEM);
>  			goto bail_wc;
> @@ -279,7 +301,10 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
>  	cq->notify = RVT_CQ_NONE;
>  	spin_lock_init(&cq->lock);
>  	INIT_WORK(&cq->comptask, send_complete);
> -	cq->queue = wc;
> +	if (u_wc)
> +		cq->queue = u_wc;
> +	else
> +		cq->kqueue = k_wc;
>
>  	ret = &cq->ibcq;
>
> @@ -289,7 +314,8 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
>  bail_ip:
>  	kfree(cq->ip);
>  bail_wc:
> -	vfree(wc);
> +	vfree(u_wc);
> +	vfree(k_wc);
>  bail_cq:
>  	kfree(cq);
>  done:
> @@ -346,9 +372,15 @@ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
>  	if (cq->notify != IB_CQ_NEXT_COMP)
>  		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
>
> -	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
> -	    cq->queue->head != cq->queue->tail)
> -		ret = 1;
> +	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
> +		if (cq->queue) {
> +			if (cq->queue->head != cq->queue->tail)
> +				ret = 1;
> +		} else {
> +			if (cq->kqueue->head != cq->kqueue->tail)
> +				ret = 1;
> +		}
> +	}
>
>  	spin_unlock_irqrestore(&cq->lock, flags);
>
> @@ -364,12 +396,14 @@ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
>  int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
>  {
>  	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
> -	struct rvt_cq_wc *old_wc;
> -	struct rvt_cq_wc *wc;
>  	u32 head, tail, n;
>  	int ret;
>  	u32 sz;
>  	struct rvt_dev_info *rdi = cq->rdi;
> +	struct rvt_cq_wc *u_wc = NULL;
> +	struct rvt_cq_wc *old_u_wc = NULL;
> +	struct rvt_k_cq_wc *k_wc = NULL;
> +	struct rvt_k_cq_wc *old_k_wc = NULL;
>
>  	if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
>  		return -EINVAL;
> @@ -377,17 +411,19 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
>  	/*
>  	 * Need to use vmalloc() if we want to support large #s of entries.
>  	 */
> -	sz = sizeof(*wc);
> -	if (udata && udata->outlen >= sizeof(__u64))
> -		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
> -	else
> -		sz += sizeof(struct ib_wc) * (cqe + 1);
> -	wc = udata ?
> -		vmalloc_user(sz) :
> -		vzalloc_node(sz, rdi->dparms.node);
> -	if (!wc)
> -		return -ENOMEM;
> -
> +	if (udata && udata->outlen >= sizeof(__u64)) {
> +		sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
> +		sz += sizeof(*u_wc);
> +		u_wc = vmalloc_user(sz);
> +		if (!u_wc)
> +			return -ENOMEM;
> +	} else {
> +		sz = sizeof(struct ib_wc) * (cqe + 1);
> +		sz += sizeof(*k_wc);
> +		k_wc = vzalloc_node(sz, rdi->dparms.node);
> +		if (!k_wc)
> +			return -ENOMEM;
> +	}
>  	/* Check that we can write the offset to mmap. */
>  	if (udata && udata->outlen >= sizeof(__u64)) {
>  		__u64 offset = 0;
> @@ -402,11 +438,18 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
>  	 * Make sure head and tail are sane since they
>  	 * might be user writable.
>  	 */
> -	old_wc = cq->queue;
> -	head = old_wc->head;
> +	if (udata && udata->outlen >= sizeof(__u64)) {
> +		old_u_wc = cq->queue;
> +		head = old_u_wc->head;
> +		tail = old_u_wc->tail;
> +	} else {
> +		old_k_wc = cq->kqueue;
> +		head = old_k_wc->head;
> +		tail = old_k_wc->tail;
> +	}
> +
>  	if (head > (u32)cq->ibcq.cqe)
>  		head = (u32)cq->ibcq.cqe;
> -	tail = old_wc->tail;
>  	if (tail > (u32)cq->ibcq.cqe)
>  		tail = (u32)cq->ibcq.cqe;
>  	if (head < tail)
> @@ -418,27 +461,36 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
>  		goto bail_unlock;
>  	}
>  	for (n = 0; tail != head; n++) {
> -		if (cq->ip)
> -			wc->uqueue[n] = old_wc->uqueue[tail];
> +		if (udata && udata->outlen >= sizeof(__u64))
> +			u_wc->uqueue[n] = old_u_wc->uqueue[tail];
>  		else
> -			wc->kqueue[n] = old_wc->kqueue[tail];
> +			k_wc->kqueue[n] = old_k_wc->kqueue[tail];
>  		if (tail == (u32)cq->ibcq.cqe)
>  			tail = 0;
>  		else
>  			tail++;
>  	}
>  	cq->ibcq.cqe = cqe;
> -	wc->head = n;
> -	wc->tail = 0;
> -	cq->queue = wc;
> +	if (u_wc) {
> +		u_wc->head = n;
> +		u_wc->tail = 0;
> +		cq->queue = u_wc;
> +	} else {
> +		k_wc->head = n;
> +		k_wc->tail = 0;
> +		cq->kqueue = k_wc;
> +	}
>  	spin_unlock_irq(&cq->lock);
>
> -	vfree(old_wc);
> +	if (u_wc)
> +		vfree(old_u_wc);
> +	else
> +		vfree(old_k_wc);
>
>  	if (cq->ip) {
>  		struct rvt_mmap_info *ip = cq->ip;
>
> -		rvt_update_mmap_info(rdi, ip, sz, wc);
> +		rvt_update_mmap_info(rdi, ip, sz, u_wc);
>
>  		/*
>  		 * Return the offset to mmap.
> @@ -462,7 +514,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
>  bail_unlock:
>  	spin_unlock_irq(&cq->lock);
>  bail_free:
> -	vfree(wc);
> +	vfree(u_wc);
> +	vfree(k_wc);
> +
>  	return ret;
>  }
>
> @@ -480,7 +534,7 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
>  int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
>  {
>  	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
> -	struct rvt_cq_wc *wc;
> +	struct rvt_k_cq_wc *wc;
>  	unsigned long flags;
>  	int npolled;
>  	u32 tail;
> @@ -491,7 +545,7 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
>
>  	spin_lock_irqsave(&cq->lock, flags);
>
> -	wc = cq->queue;
> +	wc = cq->kqueue;
>  	tail = wc->tail;
>  	if (tail > (u32)cq->ibcq.cqe)
>  		tail = (u32)cq->ibcq.cqe;
> diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
> index 75dc65c..9f25be0 100644
> --- a/include/rdma/rdmavt_cq.h
> +++ b/include/rdma/rdmavt_cq.h
> @@ -59,20 +59,17 @@
>   * notifications are armed.
>   */
>  #define RVT_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
> +#include <rdma/rvt-abi.h>
>
>  /*
>   * This structure is used to contain the head pointer, tail pointer,
>   * and completion queue entries as a single memory allocation so
>   * it can be mmap'ed into user space.
>   */
> -struct rvt_cq_wc {
> +struct rvt_k_cq_wc {
>  	u32 head;               /* index of next entry to fill */
>  	u32 tail;               /* index of next ib_poll_cq() entry */
> -	union {
> -		/* these are actually size ibcq.cqe + 1 */
> -		struct ib_uverbs_wc uqueue[0];
> -		struct ib_wc kqueue[0];
> -	};
> +	struct ib_wc kqueue[0];
>  };
>
>  /*
> @@ -88,6 +85,7 @@ struct rvt_cq {
>  	struct rvt_dev_info *rdi;
>  	struct rvt_cq_wc *queue;
>  	struct rvt_mmap_info *ip;
> +	struct rvt_k_cq_wc *kqueue;
>  };
>
>  static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
> diff --git a/include/uapi/rdma/rvt-abi.h b/include/uapi/rdma/rvt-abi.h
> new file mode 100644
> index 0000000..1bcb346
> --- /dev/null
> +++ b/include/uapi/rdma/rvt-abi.h
> @@ -0,0 +1,34 @@
> +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */

I think that it should be OpenIB license

> +
> +/*
> + * Copyright(c) 2015 - 2018 Intel Corporation.
> + */
> +
> +/*
> + * This file contains defines, structures, etc. that are used
> + * to communicate between kernel and user code.
> + */
> +
> +#ifndef RVT_ABI_USER_H
> +#define RVT_ABI_USER_H
> +
> +#include <linux/types.h>
> +#ifndef RDMA_ATOMIC_UAPI

Where is it defined? I didn't find in the kernel.

> +#define RDMA_ATOMIC_UAPI(_type, _name) _type _name
> +#endif
> +/*
> + * This structure is used to contain the head pointer, tail pointer,
> + * and completion queue entries as a single memory allocation so
> + * it can be mmap'ed into user space.
> + */
> +struct rvt_cq_wc {
> +	/* index of next entry to fill */
> +	RDMA_ATOMIC_UAPI(u32, head);
> +	/* index of next ib_poll_cq() entry */
> +	RDMA_ATOMIC_UAPI(u32, tail);
> +
> +	/* these are actually size ibcq.cqe + 1 */
> +	struct ib_uverbs_wc uqueue[0];
               ^^^^^^^^^^^^
you need to add proper include for it.

> +};
> +
> +#endif /* RVT_ABI_USER_H */
>

Attachment: signature.asc
Description: PGP signature


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux