Re: [PATCH for-next 1/1] IB/{hw,sw}: remove 'uobject->context' dependency in object creation APIs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Shamir,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on rdma/for-next]
[also build test WARNING on next-20190116]
[cannot apply to v5.0-rc2]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Shamir-Rabinovitch/IB-hw-sw-remove-uobject-context-dependency-in-object-creation-APIs/20190118-132047
base:   https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git for-next
config: x86_64-allmodconfig (attached as .config)
compiler: gcc-8 (Debian 8.2.0-14) 8.2.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All warnings (new ones prefixed by >>):

   drivers/infiniband//sw/rdmavt/qp.c: In function 'rvt_create_qp':
>> drivers/infiniband//sw/rdmavt/qp.c:1133:9: warning: assignment to 'struct ib_qp *' from 'long int' makes pointer from integer without a cast [-Wint-conversion]
        ret = PTR_ERR(context);
            ^

vim +1133 drivers/infiniband//sw/rdmavt/qp.c

   932	
   933	/**
   934	 * rvt_create_qp - create a queue pair for a device
   935	 * @ibpd: the protection domain who's device we create the queue pair for
   936	 * @init_attr: the attributes of the queue pair
   937	 * @udata: user data for libibverbs.so
   938	 *
   939	 * Queue pair creation is mostly an rvt issue. However, drivers have their own
   940	 * unique idea of what queue pair numbers mean. For instance there is a reserved
   941	 * range for PSM.
   942	 *
   943	 * Return: the queue pair on success, otherwise returns an errno.
   944	 *
   945	 * Called by the ib_create_qp() core verbs function.
   946	 */
   947	struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
   948				    struct ib_qp_init_attr *init_attr,
   949				    struct ib_udata *udata)
   950	{
   951		struct ib_ucontext *context;
   952		struct rvt_qp *qp;
   953		int err;
   954		struct rvt_swqe *swq = NULL;
   955		size_t sz;
   956		size_t sg_list_sz;
   957		struct ib_qp *ret = ERR_PTR(-ENOMEM);
   958		struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
   959		void *priv = NULL;
   960		size_t sqsize;
   961	
   962		if (!rdi)
   963			return ERR_PTR(-EINVAL);
   964	
   965		if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
   966		    init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
   967		    init_attr->create_flags)
   968			return ERR_PTR(-EINVAL);
   969	
   970		/* Check receive queue parameters if no SRQ is specified. */
   971		if (!init_attr->srq) {
   972			if (init_attr->cap.max_recv_sge >
   973			    rdi->dparms.props.max_recv_sge ||
   974			    init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
   975				return ERR_PTR(-EINVAL);
   976	
   977			if (init_attr->cap.max_send_sge +
   978			    init_attr->cap.max_send_wr +
   979			    init_attr->cap.max_recv_sge +
   980			    init_attr->cap.max_recv_wr == 0)
   981				return ERR_PTR(-EINVAL);
   982		}
   983		sqsize =
   984			init_attr->cap.max_send_wr + 1 +
   985			rdi->dparms.reserved_operations;
   986		switch (init_attr->qp_type) {
   987		case IB_QPT_SMI:
   988		case IB_QPT_GSI:
   989			if (init_attr->port_num == 0 ||
   990			    init_attr->port_num > ibpd->device->phys_port_cnt)
   991				return ERR_PTR(-EINVAL);
   992			/* fall through */
   993		case IB_QPT_UC:
   994		case IB_QPT_RC:
   995		case IB_QPT_UD:
   996			sz = sizeof(struct rvt_sge) *
   997				init_attr->cap.max_send_sge +
   998				sizeof(struct rvt_swqe);
   999			swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
  1000			if (!swq)
  1001				return ERR_PTR(-ENOMEM);
  1002	
  1003			sz = sizeof(*qp);
  1004			sg_list_sz = 0;
  1005			if (init_attr->srq) {
  1006				struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
  1007	
  1008				if (srq->rq.max_sge > 1)
  1009					sg_list_sz = sizeof(*qp->r_sg_list) *
  1010						(srq->rq.max_sge - 1);
  1011			} else if (init_attr->cap.max_recv_sge > 1)
  1012				sg_list_sz = sizeof(*qp->r_sg_list) *
  1013					(init_attr->cap.max_recv_sge - 1);
  1014			qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
  1015					  rdi->dparms.node);
  1016			if (!qp)
  1017				goto bail_swq;
  1018	
  1019			RCU_INIT_POINTER(qp->next, NULL);
  1020			if (init_attr->qp_type == IB_QPT_RC) {
  1021				qp->s_ack_queue =
  1022					kcalloc_node(rvt_max_atomic(rdi),
  1023						     sizeof(*qp->s_ack_queue),
  1024						     GFP_KERNEL,
  1025						     rdi->dparms.node);
  1026				if (!qp->s_ack_queue)
  1027					goto bail_qp;
  1028			}
  1029			/* initialize timers needed for rc qp */
  1030			timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
  1031			hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
  1032				     HRTIMER_MODE_REL);
  1033			qp->s_rnr_timer.function = rvt_rc_rnr_retry;
  1034	
  1035			/*
  1036			 * Driver needs to set up it's private QP structure and do any
  1037			 * initialization that is needed.
  1038			 */
  1039			priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
  1040			if (IS_ERR(priv)) {
  1041				ret = priv;
  1042				goto bail_qp;
  1043			}
  1044			qp->priv = priv;
  1045			qp->timeout_jiffies =
  1046				usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
  1047					1000UL);
  1048			if (init_attr->srq) {
  1049				sz = 0;
  1050			} else {
  1051				qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
  1052				qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
  1053				sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
  1054					sizeof(struct rvt_rwqe);
  1055				if (udata)
  1056					qp->r_rq.wq = vmalloc_user(
  1057							sizeof(struct rvt_rwq) +
  1058							qp->r_rq.size * sz);
  1059				else
  1060					qp->r_rq.wq = vzalloc_node(
  1061							sizeof(struct rvt_rwq) +
  1062							qp->r_rq.size * sz,
  1063							rdi->dparms.node);
  1064				if (!qp->r_rq.wq)
  1065					goto bail_driver_priv;
  1066			}
  1067	
  1068			/*
  1069			 * ib_create_qp() will initialize qp->ibqp
  1070			 * except for qp->ibqp.qp_num.
  1071			 */
  1072			spin_lock_init(&qp->r_lock);
  1073			spin_lock_init(&qp->s_hlock);
  1074			spin_lock_init(&qp->s_lock);
  1075			spin_lock_init(&qp->r_rq.lock);
  1076			atomic_set(&qp->refcount, 0);
  1077			atomic_set(&qp->local_ops_pending, 0);
  1078			init_waitqueue_head(&qp->wait);
  1079			INIT_LIST_HEAD(&qp->rspwait);
  1080			qp->state = IB_QPS_RESET;
  1081			qp->s_wq = swq;
  1082			qp->s_size = sqsize;
  1083			qp->s_avail = init_attr->cap.max_send_wr;
  1084			qp->s_max_sge = init_attr->cap.max_send_sge;
  1085			if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
  1086				qp->s_flags = RVT_S_SIGNAL_REQ_WR;
  1087	
  1088			err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
  1089					init_attr->qp_type,
  1090					init_attr->port_num);
  1091			if (err < 0) {
  1092				ret = ERR_PTR(err);
  1093				goto bail_rq_wq;
  1094			}
  1095			qp->ibqp.qp_num = err;
  1096			qp->port_num = init_attr->port_num;
  1097			rvt_init_qp(rdi, qp, init_attr->qp_type);
  1098			if (rdi->driver_f.qp_priv_init) {
  1099				err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
  1100				if (err) {
  1101					ret = ERR_PTR(err);
  1102					goto bail_rq_wq;
  1103				}
  1104			}
  1105			break;
  1106	
  1107		default:
  1108			/* Don't support raw QPs */
  1109			return ERR_PTR(-EINVAL);
  1110		}
  1111	
  1112		init_attr->cap.max_inline_data = 0;
  1113	
  1114		/*
  1115		 * Return the address of the RWQ as the offset to mmap.
  1116		 * See rvt_mmap() for details.
  1117		 */
  1118		if (udata && udata->outlen >= sizeof(__u64)) {
  1119			if (!qp->r_rq.wq) {
  1120				__u64 offset = 0;
  1121	
  1122				err = ib_copy_to_udata(udata, &offset,
  1123						       sizeof(offset));
  1124				if (err) {
  1125					ret = ERR_PTR(err);
  1126					goto bail_qpn;
  1127				}
  1128			} else {
  1129				u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
  1130	
  1131				context = rdma_get_ucontext(udata);
  1132				if (IS_ERR(context)) {
> 1133					ret = PTR_ERR(context);
  1134					goto bail_qpn;
  1135				}
  1136	
  1137				qp->ip = rvt_create_mmap_info(rdi, s, context,
  1138							      qp->r_rq.wq);
  1139				if (!qp->ip) {
  1140					ret = ERR_PTR(-ENOMEM);
  1141					goto bail_qpn;
  1142				}
  1143	
  1144				err = ib_copy_to_udata(udata, &qp->ip->offset,
  1145						       sizeof(qp->ip->offset));
  1146				if (err) {
  1147					ret = ERR_PTR(err);
  1148					goto bail_ip;
  1149				}
  1150			}
  1151			qp->pid = current->pid;
  1152		}
  1153	
  1154		spin_lock(&rdi->n_qps_lock);
  1155		if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
  1156			spin_unlock(&rdi->n_qps_lock);
  1157			ret = ERR_PTR(-ENOMEM);
  1158			goto bail_ip;
  1159		}
  1160	
  1161		rdi->n_qps_allocated++;
  1162		/*
  1163		 * Maintain a busy_jiffies variable that will be added to the timeout
  1164		 * period in mod_retry_timer and add_retry_timer. This busy jiffies
  1165		 * is scaled by the number of rc qps created for the device to reduce
  1166		 * the number of timeouts occurring when there is a large number of
  1167		 * qps. busy_jiffies is incremented every rc qp scaling interval.
  1168		 * The scaling interval is selected based on extensive performance
  1169		 * evaluation of targeted workloads.
  1170		 */
  1171		if (init_attr->qp_type == IB_QPT_RC) {
  1172			rdi->n_rc_qps++;
  1173			rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
  1174		}
  1175		spin_unlock(&rdi->n_qps_lock);
  1176	
  1177		if (qp->ip) {
  1178			spin_lock_irq(&rdi->pending_lock);
  1179			list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
  1180			spin_unlock_irq(&rdi->pending_lock);
  1181		}
  1182	
  1183		ret = &qp->ibqp;
  1184	
  1185		/*
  1186		 * We have our QP and its good, now keep track of what types of opcodes
  1187		 * can be processed on this QP. We do this by keeping track of what the
  1188		 * 3 high order bits of the opcode are.
  1189		 */
  1190		switch (init_attr->qp_type) {
  1191		case IB_QPT_SMI:
  1192		case IB_QPT_GSI:
  1193		case IB_QPT_UD:
  1194			qp->allowed_ops = IB_OPCODE_UD;
  1195			break;
  1196		case IB_QPT_RC:
  1197			qp->allowed_ops = IB_OPCODE_RC;
  1198			break;
  1199		case IB_QPT_UC:
  1200			qp->allowed_ops = IB_OPCODE_UC;
  1201			break;
  1202		default:
  1203			ret = ERR_PTR(-EINVAL);
  1204			goto bail_ip;
  1205		}
  1206	
  1207		return ret;
  1208	
  1209	bail_ip:
  1210		if (qp->ip)
  1211			kref_put(&qp->ip->ref, rvt_release_mmap_info);
  1212	
  1213	bail_qpn:
  1214		rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
  1215	
  1216	bail_rq_wq:
  1217		if (!qp->ip)
  1218			vfree(qp->r_rq.wq);
  1219	
  1220	bail_driver_priv:
  1221		rdi->driver_f.qp_priv_free(rdi, qp);
  1222	
  1223	bail_qp:
  1224		kfree(qp->s_ack_queue);
  1225		kfree(qp);
  1226	
  1227	bail_swq:
  1228		vfree(swq);
  1229	
  1230		return ret;
  1231	}
  1232	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux