[...] > + > +/** > + * zynqmp_r5_probe - Probes ZynqMP R5 processor device node > + * this is called for each individual R5 core to > + * set up mailbox, Xilinx platform manager unique ID, > + * add to rproc core > + * > + * @pdev: domain platform device for current R5 core > + * @node: pointer of the device node for current R5 core > + * @rpu_mode: mode to configure RPU, split or lockstep > + * > + * Return: 0 for success, negative value for failure. > + */ > +static struct zynqmp_r5_rproc *zynqmp_r5_probe(struct platform_device *pdev, > + struct device_node *node, > + enum rpu_oper_mode rpu_mode) > +{ > + int ret, num_banks; > + struct device *dev = &pdev->dev; > + struct rproc *rproc_ptr; > + struct zynqmp_r5_rproc *z_rproc; > + struct device_node *r5_node; > + > + /* Allocate remoteproc instance */ > + rproc_ptr = devm_rproc_alloc(dev, dev_name(dev), &zynqmp_r5_rproc_ops, > + NULL, sizeof(struct zynqmp_r5_rproc)); > + if (!rproc_ptr) { > + ret = -ENOMEM; > + goto error; > + } > + > + rproc_ptr->auto_boot = false; > + z_rproc = rproc_ptr->priv; > + z_rproc->rproc = rproc_ptr; > + r5_node = z_rproc->rproc->dev.parent->of_node; > + > + /* Set up DMA mask */ > + ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); > + if (ret) > + goto error; > + > + /* Get R5 power domain node */ > + ret = of_property_read_u32(node, "power-domain", &z_rproc->pnode_id); > + if (ret) > + goto error; > + > + ret = r5_set_mode(z_rproc, rpu_mode); > + if (ret) > + goto error; > + > + if (of_property_read_bool(node, "mboxes")) { > + ret = zynqmp_r5_setup_mbox(z_rproc, node); > + if (ret) > + goto error; > + } > + > + /* go through TCM banks for r5 node */ > + num_banks = of_count_phandle_with_args(r5_node, BANK_LIST_PROP, NULL); > + if (num_banks <= 0) { > + dev_err(dev, "need to specify TCM banks\n"); > + ret = -EINVAL; > + goto error; > + } > + > + if (num_banks > NUM_SRAMS) { > + dev_err(dev, "max number of srams is %d. given: %d \r\n", > + NUM_SRAMS, num_banks); > + ret = -EINVAL; > + goto error; > + } > + > + /* construct collection of srams used by the current R5 core */ > + for (; num_banks; num_banks--) { > + struct resource rsc; > + struct device_node *dt_node; > + resource_size_t size; > + int i; > + > + dt_node = of_parse_phandle(r5_node, BANK_LIST_PROP, i); Variable @i is not initialised but it is used as an index to retrieve a handle to the sram banks. That code _should_ have failed frequently or at least have yielded abnormal results often enough to be noticed. Why wasn't it the case? I will stop here for the moment. > + if (!dt_node) { > + ret = -EINVAL; > + goto error; > + } > + > + ret = of_address_to_resource(dt_node, 0, &rsc); > + if (ret < 0) { > + of_node_put(dt_node); > + goto error; > + } > + > + of_node_put(dt_node); > + size = resource_size(&rsc); > + > + /* > + * Find corresponding Xilinx platform management ID. > + * The bank information is used in prepare/unprepare and > + * parse_fw. > + */ > + for (i = 0; i < NUM_SRAMS; i++) { > + if (rsc.start == zynqmp_banks[i].addr) { > + z_rproc->srams[i].addr = rsc.start; > + z_rproc->srams[i].size = size; > + z_rproc->srams[i].id = zynqmp_banks[i].id; > + break; > + } > + } > + > + if (i == NUM_SRAMS) { > + dev_err(dev, "sram %llx is not valid.\n", rsc.start); > + ret = -EINVAL; > + goto error; > + } > + } > + > + /* Add R5 remoteproc */ > + ret = devm_rproc_add(dev, rproc_ptr); > + if (ret) { > + zynqmp_r5_cleanup_mbox(z_rproc); > + goto error; > + } > + > + return z_rproc; > +error: > + return ERR_PTR(ret); > +} > + > +/* > + * zynqmp_r5_remoteproc_probe > + * > + * @pdev: domain platform device for R5 cluster > + * > + * called when driver is probed, for each R5 core specified in DT, > + * setup as needed to do remoteproc-related operations > + * > + * Return: 0 for success, negative value for failure. > + */ > +static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev) > +{ > + int ret, core_count; > + struct device *dev = &pdev->dev; > + struct device_node *nc; > + enum rpu_oper_mode rpu_mode = PM_RPU_MODE_LOCKSTEP; > + struct list_head *cluster; /* list to track each core's rproc */ > + struct zynqmp_r5_rproc *z_rproc; > + struct platform_device *child_pdev; > + struct list_head *pos; > + > + ret = of_property_read_u32(dev->of_node, "xlnx,cluster-mode", &rpu_mode); > + if (ret < 0 || (rpu_mode != PM_RPU_MODE_LOCKSTEP && > + rpu_mode != PM_RPU_MODE_SPLIT)) { > + dev_err(dev, "invalid cluster mode: ret %d mode %x\n", > + ret, rpu_mode); > + return ret; > + } > + > + dev_dbg(dev, "RPU configuration: %s\n", > + rpu_mode == PM_RPU_MODE_LOCKSTEP ? "lockstep" : "split"); > + > + /* > + * if 2 RPUs provided but one is lockstep, then we have an > + * invalid configuration. > + */ > + > + core_count = of_get_available_child_count(dev->of_node); > + if ((rpu_mode == PM_RPU_MODE_LOCKSTEP && core_count != 1) || > + core_count > MAX_RPROCS) > + return -EINVAL; > + > + cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL); > + if (!cluster) > + return -ENOMEM; > + INIT_LIST_HEAD(cluster); > + > + ret = devm_of_platform_populate(dev); > + if (ret) { > + dev_err(dev, "devm_of_platform_populate failed, ret = %d\n", ret); > + return ret; > + } > + > + /* probe each individual r5 core's remoteproc-related info */ > + for_each_available_child_of_node(dev->of_node, nc) { > + child_pdev = of_find_device_by_node(nc); > + if (!child_pdev) { > + dev_err(dev, "could not get R5 core platform device\n"); > + ret = -ENODEV; > + goto out; > + } > + > + z_rproc = zynqmp_r5_probe(child_pdev, nc, rpu_mode); > + dev_dbg(dev, "%s to probe rpu %pOF\n", > + ret ? "Failed" : "Able", nc); > + if (IS_ERR(z_rproc)) { > + ret = PTR_ERR(z_rproc); > + goto out; > + } > + list_add_tail(&z_rproc->elem, cluster); > + } > + /* wire in so each core can be cleaned up at driver remove */ > + platform_set_drvdata(pdev, cluster); > + return 0; > +out: > + list_for_each(pos, cluster) { > + z_rproc = list_entry(pos, struct zynqmp_r5_rproc, elem); > + zynqmp_r5_cleanup_mbox(z_rproc); > + } > + return ret; > +} > + > +/* > + * zynqmp_r5_remoteproc_remove > + * > + * @pdev: domain platform device for R5 cluster > + * > + * When the driver is unloaded, clean up the mailboxes for each > + * remoteproc that was initially probed. > + */ > +static int zynqmp_r5_remoteproc_remove(struct platform_device *pdev) > +{ > + struct list_head *pos, *temp, *cluster = (struct list_head *) > + platform_get_drvdata(pdev); > + struct zynqmp_r5_rproc *z_rproc = NULL; > + > + list_for_each_safe(pos, temp, cluster) { > + z_rproc = list_entry(pos, struct zynqmp_r5_rproc, elem); > + zynqmp_r5_cleanup_mbox(z_rproc); > + } > + return 0; > +} > + > +/* Match table for OF platform binding */ > +static const struct of_device_id zynqmp_r5_remoteproc_match[] = { > + { .compatible = "xlnx,zynqmp-r5-remoteproc", }, > + { /* end of list */ }, > +}; > +MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match); > + > +static struct platform_driver zynqmp_r5_remoteproc_driver = { > + .probe = zynqmp_r5_remoteproc_probe, > + .remove = zynqmp_r5_remoteproc_remove, > + .driver = { > + .name = "zynqmp_r5_remoteproc", > + .of_match_table = zynqmp_r5_remoteproc_match, > + }, > +}; > +module_platform_driver(zynqmp_r5_remoteproc_driver); > + > +MODULE_AUTHOR("Ben Levinsky <ben.levinsky@xxxxxxxxxx>"); > +MODULE_LICENSE("GPL v2"); > -- > 2.17.1 >