sparse warnings: (new ones prefixed by >>)
drivers/block/virtio_blk.c:238:24: sparse: sparse: incorrect type in return expression (different base types) @@ expected int @@ got restricted blk_status_t [usertype] @@
drivers/block/virtio_blk.c:238:24: sparse: expected int
drivers/block/virtio_blk.c:238:24: sparse: got restricted blk_status_t [usertype]
drivers/block/virtio_blk.c:246:32: sparse: sparse: incorrect type in return expression (different base types) @@ expected int @@ got restricted blk_status_t [usertype] @@
drivers/block/virtio_blk.c:246:32: sparse: expected int
drivers/block/virtio_blk.c:246:32: sparse: got restricted blk_status_t [usertype]
drivers/block/virtio_blk.c:320:24: sparse: sparse: incorrect type in return expression (different base types) @@ expected restricted blk_status_t @@ got int [assigned] err @@
drivers/block/virtio_blk.c:320:24: sparse: expected restricted blk_status_t
drivers/block/virtio_blk.c:320:24: sparse: got int [assigned] err
vim +238 drivers/block/virtio_blk.c
203
204 static int virtblk_setup_cmd(struct virtio_device *vdev, struct request *req,
205 struct virtblk_req *vbr)
206 {
207 bool unmap = false;
208 u32 type;
209
210 vbr->out_hdr.sector = 0;
211
212 switch (req_op(req)) {
213 case REQ_OP_READ:
214 type = VIRTIO_BLK_T_IN;
215 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
216 blk_rq_pos(req));
217 break;
218 case REQ_OP_WRITE:
219 type = VIRTIO_BLK_T_OUT;
220 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
221 blk_rq_pos(req));
222 break;
223 case REQ_OP_FLUSH:
224 type = VIRTIO_BLK_T_FLUSH;
225 break;
226 case REQ_OP_DISCARD:
227 type = VIRTIO_BLK_T_DISCARD;
228 break;
229 case REQ_OP_WRITE_ZEROES:
230 type = VIRTIO_BLK_T_WRITE_ZEROES;
231 unmap = !(req->cmd_flags & REQ_NOUNMAP);
232 break;
233 case REQ_OP_DRV_IN:
234 type = VIRTIO_BLK_T_GET_ID;
235 break;
236 default:
237 WARN_ON_ONCE(1);
> 238 return BLK_STS_IOERR;
239 }
240
241 vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
242 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
243
244 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
245 if (virtblk_setup_discard_write_zeroes(req, unmap))
246 return BLK_STS_RESOURCE;
247 }
248
249 return 0;
250 }
251
252 static inline void virtblk_request_done(struct request *req)
253 {
254 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
255
256 virtblk_unmap_data(req, vbr);
257 virtblk_cleanup_cmd(req);
258 blk_mq_end_request(req, virtblk_result(vbr));
259 }
260
261 static void virtblk_done(struct virtqueue *vq)
262 {
263 struct virtio_blk *vblk = vq->vdev->priv;
264 bool req_done = false;
265 int qid = vq->index;
266 struct virtblk_req *vbr;
267 unsigned long flags;
268 unsigned int len;
269
270 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
271 do {
272 virtqueue_disable_cb(vq);
273 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
274 struct request *req = blk_mq_rq_from_pdu(vbr);
275
276 if (likely(!blk_should_fake_timeout(req->q)))
277 blk_mq_complete_request(req);
278 req_done = true;
279 }
280 if (unlikely(virtqueue_is_broken(vq)))
281 break;
282 } while (!virtqueue_enable_cb(vq));
283
284 /* In case queue is stopped waiting for more buffers. */
285 if (req_done)
286 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
287 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
288 }
289
290 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
291 {
292 struct virtio_blk *vblk = hctx->queue->queuedata;
293 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
294 bool kick;
295
296 spin_lock_irq(&vq->lock);
297 kick = virtqueue_kick_prepare(vq->vq);
298 spin_unlock_irq(&vq->lock);
299
300 if (kick)
301 virtqueue_notify(vq->vq);
302 }
303
304 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
305 const struct blk_mq_queue_data *bd)
306 {
307 struct virtio_blk *vblk = hctx->queue->queuedata;
308 struct request *req = bd->rq;
309 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
310 unsigned long flags;
311 unsigned int num;
312 int qid = hctx->queue_num;
313 int err;
314 bool notify = false;
315
316 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
317
318 err = virtblk_setup_cmd(vblk->vdev, req, vbr);
319 if (unlikely(err))
> 320 return err;
321
322 blk_mq_start_request(req);
323
324 num = virtblk_map_data(hctx, req, vbr);
325 if (unlikely(num < 0)) {
326 virtblk_cleanup_cmd(req);
327 return BLK_STS_RESOURCE;
328 }
329
330 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
331 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
332 if (err) {
333 virtqueue_kick(vblk->vqs[qid].vq);
334 /* Don't stop the queue if -ENOMEM: we may have failed to
335 * bounce the buffer due to global resource outage.
336 */
337 if (err == -ENOSPC)
338 blk_mq_stop_hw_queue(hctx);
339 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
340 virtblk_unmap_data(req, vbr);
341 virtblk_cleanup_cmd(req);
342 switch (err) {
343 case -ENOSPC:
344 return BLK_STS_DEV_RESOURCE;
345 case -ENOMEM:
346 return BLK_STS_RESOURCE;
347 default:
348 return BLK_STS_IOERR;
349 }
350 }
351
352 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
353 notify = true;
354 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
355
356 if (notify)
357 virtqueue_notify(vblk->vqs[qid].vq);
358 return BLK_STS_OK;
359 }
360
---
0-DAY CI Kernel Test Service, Intel Corporation
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.01.org%2Fhyperkitty%2Flist%2Fkbuild-all%40lists.01.org&data=04%7C01%7Cmgurtovoy%40nvidia.com%7Cbf2909ef00c34a40373708d9978d58cd%7C43083d15727340c1b7db39efd9ccc17a%7C0%7C0%7C637707455621298762%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=BbUjkMGWBZUAfGJLrUOG625%2FU%2BvhXzkBBYAhdy8vQ4U%3D&reserved=0