Hello Adit Ranadive, The patch 29c8d9eba550: "IB: Add vmw_pvrdma driver" from Oct 2, 2016, leads to the following Smatch static checker warning: drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c:712 pvrdma_post_send() warn: unsigned 'wr->opcode' is never less than zero. drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 671 int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 672 const struct ib_send_wr **bad_wr) 673 { 674 struct pvrdma_qp *qp = to_vqp(ibqp); 675 struct pvrdma_dev *dev = to_vdev(ibqp->device); 676 unsigned long flags; 677 struct pvrdma_sq_wqe_hdr *wqe_hdr; 678 struct pvrdma_sge *sge; 679 int i, ret; 680 681 /* 682 * In states lower than RTS, we can fail immediately. In other states, 683 * just post and let the device figure it out. 684 */ 685 if (qp->state < IB_QPS_RTS) { 686 *bad_wr = wr; 687 return -EINVAL; 688 } 689 690 spin_lock_irqsave(&qp->sq.lock, flags); 691 692 while (wr) { 693 unsigned int tail = 0; 694 695 if (unlikely(!pvrdma_idx_ring_has_space( 696 qp->sq.ring, qp->sq.wqe_cnt, &tail))) { 697 dev_warn_ratelimited(&dev->pdev->dev, 698 "send queue is full\n"); 699 *bad_wr = wr; 700 ret = -ENOMEM; 701 goto out; 702 } 703 704 if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) { 705 dev_warn_ratelimited(&dev->pdev->dev, 706 "send SGE overflow\n"); 707 *bad_wr = wr; 708 ret = -EINVAL; 709 goto out; 710 } 711 --> 712 if (unlikely(wr->opcode < 0)) { ^^^^^^^^^^ wr->opcode is an enum and enum signedness is undefined in C but in this context it's unsigned. Just checking for negatives seems insufficient here anyway. Perhaps the check can be deleted? 713 dev_warn_ratelimited(&dev->pdev->dev, 714 "invalid send opcode\n"); 715 *bad_wr = wr; 716 ret = -EINVAL; 717 goto out; 718 } 719 720 /* 721 * Only support UD, RC. 722 * Need to check opcode table for thorough checking. 723 * opcode _UD _UC _RC 724 * _SEND x x x 725 * _SEND_WITH_IMM x x x 726 * _RDMA_WRITE x x 727 * _RDMA_WRITE_WITH_IMM x x 728 * _LOCAL_INV x x 729 * _SEND_WITH_INV x x 730 * _RDMA_READ x 731 * _ATOMIC_CMP_AND_SWP x 732 * _ATOMIC_FETCH_AND_ADD x 733 * _MASK_ATOMIC_CMP_AND_SWP x 734 * _MASK_ATOMIC_FETCH_AND_ADD x 735 * _REG_MR x 736 * 737 */ 738 if (qp->ibqp.qp_type != IB_QPT_UD && 739 qp->ibqp.qp_type != IB_QPT_RC && 740 wr->opcode != IB_WR_SEND) { regards, dan carpenter