Hello Long Li, This is a semi-automatic email about new static checker warnings. The patch 6dce3468a04c: "RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter" from Sep 20, 2022, leads to the following Smatch complaint: drivers/infiniband/hw/mana/qp.c:221 mana_ib_create_qp_rss() warn: variable dereferenced before check 'udata' (see line 115) drivers/infiniband/hw/mana/qp.c 114 115 if (udata->inlen < sizeof(ucmd)) ^^^^^^^^^^^^ This code assumes "udata" is non-NULL 116 return -EINVAL; 117 118 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); 119 if (ret) { 120 ibdev_dbg(&mdev->ib_dev, 121 "Failed copy from udata for create rss-qp, err %d\n", 122 ret); 123 return -EFAULT; 124 } 125 126 if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) { 127 ibdev_dbg(&mdev->ib_dev, 128 "Requested max_recv_wr %d exceeding limit.\n", 129 attr->cap.max_recv_wr); 130 return -EINVAL; 131 } 132 133 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) { 134 ibdev_dbg(&mdev->ib_dev, 135 "Requested max_recv_sge %d exceeding limit.\n", 136 attr->cap.max_recv_sge); 137 return -EINVAL; 138 } 139 140 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) { 141 ibdev_dbg(&mdev->ib_dev, 142 "RX Hash function is not supported, %d\n", 143 ucmd.rx_hash_function); 144 return -EINVAL; 145 } 146 147 /* IB ports start with 1, MANA start with 0 */ 148 port = ucmd.port; 149 if (port < 1 || port > mc->num_ports) { 150 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n", 151 port); 152 return -EINVAL; 153 } 154 ndev = mc->ports[port - 1]; 155 mpc = netdev_priv(ndev); 156 157 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n", 158 ucmd.rx_hash_function, port); 159 160 mana_ind_table = kzalloc(sizeof(mana_handle_t) * 161 (1 << ind_tbl->log_ind_tbl_size), 162 GFP_KERNEL); 163 if (!mana_ind_table) { 164 ret = -ENOMEM; 165 goto fail; 166 } 167 168 qp->port = port; 169 170 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { 171 struct mana_obj_spec wq_spec = {}; 172 struct mana_obj_spec cq_spec = {}; 173 174 ibwq = ind_tbl->ind_tbl[i]; 175 wq = container_of(ibwq, struct mana_ib_wq, ibwq); 176 177 ibcq = ibwq->cq; 178 cq = container_of(ibcq, struct mana_ib_cq, ibcq); 179 180 wq_spec.gdma_region = wq->gdma_region; 181 wq_spec.queue_size = wq->wq_buf_size; 182 183 cq_spec.gdma_region = cq->gdma_region; 184 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; 185 cq_spec.modr_ctx_id = 0; 186 cq_spec.attached_eq = GDMA_CQ_NO_EQ; 187 188 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ, 189 &wq_spec, &cq_spec, &wq->rx_object); 190 if (ret) 191 goto fail; 192 193 /* The GDMA regions are now owned by the WQ object */ 194 wq->gdma_region = GDMA_INVALID_DMA_REGION; 195 cq->gdma_region = GDMA_INVALID_DMA_REGION; 196 197 wq->id = wq_spec.queue_index; 198 cq->id = cq_spec.queue_index; 199 200 ibdev_dbg(&mdev->ib_dev, 201 "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", 202 ret, wq->rx_object, wq->id, cq->id); 203 204 resp.entries[i].cqid = cq->id; 205 resp.entries[i].wqid = wq->id; 206 207 mana_ind_table[i] = wq->rx_object; 208 } 209 resp.num_entries = i; 210 211 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object, 212 mana_ind_table, 213 ind_tbl->log_ind_tbl_size, 214 ucmd.rx_hash_key_len, 215 ucmd.rx_hash_key); 216 if (ret) 217 goto fail; 218 219 kfree(mana_ind_table); 220 221 if (udata) { ^^^^^ Can it be NULL? 222 ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); 223 if (ret) { regards, dan carpenter