Hello Keith Busch, Commit c58c0074c54c ("block/bio: remove duplicate append pages code") from Jun 10, 2022 (linux-next), leads to the following Smatch static checker warning: block/bio.c:1307 __bio_iov_iter_get_pages() error: we previously assumed 'bio->bi_bdev' could be null (see line 1291) block/bio.c 1253 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 1254 { 1255 iov_iter_extraction_t extraction_flags = 0; 1256 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 1257 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; 1258 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 1259 struct page **pages = (struct page **)bv; 1260 ssize_t size, left; 1261 unsigned len, i = 0; 1262 size_t offset; 1263 int ret = 0; 1264 1265 /* 1266 * Move page array up in the allocated memory for the bio vecs as far as 1267 * possible so that we can start filling biovecs from the beginning 1268 * without overwriting the temporary page array. 1269 */ 1270 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); 1271 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); 1272 1273 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) ^^^^^^^^^^^^ Assumes bio->bi_bdev can be NULL 1274 extraction_flags |= ITER_ALLOW_P2PDMA; 1275 1276 /* 1277 * Each segment in the iov is required to be a block size multiple. 1278 * However, we may not be able to get the entire segment if it spans 1279 * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the 1280 * result to ensure the bio's total size is correct. The remainder of 1281 * the iov data will be picked up in the next bio iteration. 1282 */ 1283 size = iov_iter_extract_pages(iter, &pages, 1284 UINT_MAX - bio->bi_iter.bi_size, 1285 nr_pages, extraction_flags, &offset); 1286 if (unlikely(size <= 0)) 1287 return size ? size : -EFAULT; 1288 1289 nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); 1290 1291 if (bio->bi_bdev) { ^^^^^^^^^^^^ More checks 1292 size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1); 1293 iov_iter_revert(iter, trim); 1294 size -= trim; 1295 } 1296 1297 if (unlikely(!size)) { 1298 ret = -EFAULT; 1299 goto out; 1300 } 1301 1302 for (left = size, i = 0; left > 0; left -= len, i++) { 1303 struct page *page = pages[i]; 1304 1305 len = min_t(size_t, PAGE_SIZE - offset, left); 1306 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { --> 1307 ret = bio_iov_add_zone_append_page(bio, page, len, ^^^ bio->bi_bdev is dereferenced inside the function 1308 offset); 1309 if (ret) 1310 break; 1311 } else 1312 bio_iov_add_page(bio, page, len, offset); 1313 1314 offset = 0; 1315 } 1316 1317 iov_iter_revert(iter, left); 1318 out: 1319 while (i < nr_pages) 1320 bio_release_page(bio, pages[i++]); 1321 1322 return ret; 1323 } regards, dan carpenter