On Tue, May 14, 2024 at 11:16:19AM GMT, Tomasz Jeznach wrote: ... > +static int riscv_iommu_bond_link(struct riscv_iommu_domain *domain, > + struct device *dev) > +{ > + struct riscv_iommu_device *iommu = dev_to_iommu(dev); > + struct riscv_iommu_bond *bond; > + struct list_head *bonds; > + > + bond = kzalloc(sizeof(*bond), GFP_KERNEL); > + if (!bond) > + return -ENOMEM; > + bond->dev = dev; > + > + /* > + * List of devices attached to the domain is arranged based on > + * managed IOMMU device. > + */ > + > + spin_lock(&domain->lock); > + list_for_each_rcu(bonds, &domain->bonds) > + if (dev_to_iommu(list_entry(bonds, struct riscv_iommu_bond, list)->dev) == iommu) > + break; We should wrap this list_for_each_rcu() in rcu_read_lock() and rcu_read_unlock(). Thanks, drew > + list_add_rcu(&bond->list, bonds); > + spin_unlock(&domain->lock); > + > + /* Synchronize with riscv_iommu_iotlb_inval() sequence. See comment below. */ > + smp_mb(); > + > + return 0; > +}