[ Ancient code, but still maybe the bug is easy to fix? -dan ] Hello Guennadi Liakhovetski, The patch b45b262cefd5: "dmaengine: add a driver for AMBA AXI NBPF DMAC IP cores" from Jul 19, 2014, leads to the following Smatch static checker warning: drivers/dma/nbpfaxi.c:1358 nbpf_probe() warn: potentially one past the end of array 'nbpf->chan[i]' drivers/dma/nbpfaxi.c 1291 static int nbpf_probe(struct platform_device *pdev) 1292 { 1293 struct device *dev = &pdev->dev; 1294 struct device_node *np = dev->of_node; 1295 struct nbpf_device *nbpf; 1296 struct dma_device *dma_dev; 1297 struct resource *iomem; 1298 const struct nbpf_config *cfg; 1299 int num_channels; 1300 int ret, irq, eirq, i; 1301 int irqbuf[9] /* maximum 8 channels + error IRQ */; 1302 unsigned int irqs = 0; 1303 1304 BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); 1305 1306 /* DT only */ 1307 if (!np) 1308 return -ENODEV; 1309 1310 cfg = of_device_get_match_data(dev); 1311 num_channels = cfg->num_channels; 1312 1313 nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels), ^^^^^^^^^^^^^^^^^^^^^^^^ nbpf->chan[] has num_channels elements. 1314 GFP_KERNEL); 1315 if (!nbpf) 1316 return -ENOMEM; 1317 1318 dma_dev = &nbpf->dma_dev; 1319 dma_dev->dev = dev; 1320 1321 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1322 nbpf->base = devm_ioremap_resource(dev, iomem); 1323 if (IS_ERR(nbpf->base)) 1324 return PTR_ERR(nbpf->base); 1325 1326 nbpf->clk = devm_clk_get(dev, NULL); 1327 if (IS_ERR(nbpf->clk)) 1328 return PTR_ERR(nbpf->clk); 1329 1330 of_property_read_u32(np, "max-burst-mem-read", 1331 &nbpf->max_burst_mem_read); 1332 of_property_read_u32(np, "max-burst-mem-write", 1333 &nbpf->max_burst_mem_write); 1334 1335 nbpf->config = cfg; 1336 1337 for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { 1338 irq = platform_get_irq_optional(pdev, i); 1339 if (irq < 0 && irq != -ENXIO) 1340 return irq; 1341 if (irq > 0) 1342 irqbuf[irqs++] = irq; 1343 } 1344 1345 /* 1346 * 3 IRQ resource schemes are supported: 1347 * 1. 1 shared IRQ for error and all channels 1348 * 2. 2 IRQs: one for error and one shared for all channels 1349 * 3. 1 IRQ for error and an own IRQ for each channel 1350 */ 1351 if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) ^^^^^^^^^^^^^^^^ 1352 return -ENXIO; 1353 1354 if (irqs == 1) { 1355 eirq = irqbuf[0]; 1356 1357 for (i = 0; i <= num_channels; i++) ^^ --> 1358 nbpf->chan[i].irq = irqbuf[0]; ^^^^^^^^^^^^ Off by one because of the <=. There are a bunch of weird num_channels + 1 limits so it's not clear what's happening in this function. 1359 } else { 1360 eirq = platform_get_irq_byname(pdev, "error"); 1361 if (eirq < 0) 1362 return eirq; 1363 1364 if (irqs == num_channels + 1) { ^^^^^^^^^^^^^^^^ 1365 struct nbpf_channel *chan; 1366 1367 for (i = 0, chan = nbpf->chan; i <= num_channels; ^^^^^^^^^^^^^^^^^^ 1368 i++, chan++) { 1369 /* Skip the error IRQ */ 1370 if (irqbuf[i] == eirq) 1371 i++; 1372 chan->irq = irqbuf[i]; 1373 } 1374 1375 if (chan != nbpf->chan + num_channels) 1376 return -EINVAL; 1377 } else { 1378 /* 2 IRQs and more than one channel */ 1379 if (irqbuf[0] == eirq) 1380 irq = irqbuf[1]; 1381 else 1382 irq = irqbuf[0]; 1383 1384 for (i = 0; i <= num_channels; i++) ^^^^^^^^^^^^^^^^^ 1385 nbpf->chan[i].irq = irq; 1386 } 1387 } 1388 1389 ret = devm_request_irq(dev, eirq, nbpf_err_irq, 1390 IRQF_SHARED, "dma error", nbpf); 1391 if (ret < 0) 1392 return ret; 1393 nbpf->eirq = eirq; 1394 1395 INIT_LIST_HEAD(&dma_dev->channels); 1396 1397 /* Create DMA Channel */ 1398 for (i = 0; i < num_channels; i++) { 1399 ret = nbpf_chan_probe(nbpf, i); 1400 if (ret < 0) 1401 return ret; 1402 } 1403 1404 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 1405 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); regards, dan carpenter