Hi Marek, I love your patch! Yet something to improve: [auto build test ERROR on next-20200618] [also build test ERROR on v5.8-rc1] [cannot apply to linuxtv-media/master staging/staging-testing drm-exynos/exynos-drm-next drm-intel/for-linux-next linus/master v5.8-rc1 v5.7 v5.7-rc7] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Marek-Szyprowski/DRM-fix-struct-sg_table-nents-vs-orig_nents-misuse/20200619-184302 base: ce2cc8efd7a40cbd17841add878cb691d0ce0bba config: arm64-randconfig-r036-20200621 (attached as .config) compiler: clang version 11.0.0 (https://github.com/llvm/llvm-project ef455a55bcf2cfea04a99c361b182ad18b7f03f1) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install arm64 cross compiling tool for clang build # apt-get install binutils-aarch64-linux-gnu # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> All errors (new ones prefixed by >>): >> drivers/gpu/host1x/job.c:230:10: error: implicit declaration of function 'iommu_map_sgtable' [-Werror,-Wimplicit-function-declaration] err = iommu_map_sgtable(host->domain, ^ drivers/gpu/host1x/job.c:230:10: note: did you mean 'dma_map_sgtable'? include/linux/dma-mapping.h:628:19: note: 'dma_map_sgtable' declared here static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, ^ 1 error generated. vim +/iommu_map_sgtable +230 drivers/gpu/host1x/job.c 100 101 static unsigned int pin_job(struct host1x *host, struct host1x_job *job) 102 { 103 struct host1x_client *client = job->client; 104 struct device *dev = client->dev; 105 struct iommu_domain *domain; 106 unsigned int i; 107 int err; 108 109 domain = iommu_get_domain_for_dev(dev); 110 job->num_unpins = 0; 111 112 for (i = 0; i < job->num_relocs; i++) { 113 struct host1x_reloc *reloc = &job->relocs[i]; 114 dma_addr_t phys_addr, *phys; 115 struct sg_table *sgt; 116 117 reloc->target.bo = host1x_bo_get(reloc->target.bo); 118 if (!reloc->target.bo) { 119 err = -EINVAL; 120 goto unpin; 121 } 122 123 /* 124 * If the client device is not attached to an IOMMU, the 125 * physical address of the buffer object can be used. 126 * 127 * Similarly, when an IOMMU domain is shared between all 128 * host1x clients, the IOVA is already available, so no 129 * need to map the buffer object again. 130 * 131 * XXX Note that this isn't always safe to do because it 132 * relies on an assumption that no cache maintenance is 133 * needed on the buffer objects. 134 */ 135 if (!domain || client->group) 136 phys = &phys_addr; 137 else 138 phys = NULL; 139 140 sgt = host1x_bo_pin(dev, reloc->target.bo, phys); 141 if (IS_ERR(sgt)) { 142 err = PTR_ERR(sgt); 143 goto unpin; 144 } 145 146 if (sgt) { 147 unsigned long mask = HOST1X_RELOC_READ | 148 HOST1X_RELOC_WRITE; 149 enum dma_data_direction dir; 150 151 switch (reloc->flags & mask) { 152 case HOST1X_RELOC_READ: 153 dir = DMA_TO_DEVICE; 154 break; 155 156 case HOST1X_RELOC_WRITE: 157 dir = DMA_FROM_DEVICE; 158 break; 159 160 case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE: 161 dir = DMA_BIDIRECTIONAL; 162 break; 163 164 default: 165 err = -EINVAL; 166 goto unpin; 167 } 168 169 err = dma_map_sgtable(dev, sgt, dir, 0); 170 if (err) 171 goto unpin; 172 173 job->unpins[job->num_unpins].dev = dev; 174 job->unpins[job->num_unpins].dir = dir; 175 phys_addr = sg_dma_address(sgt->sgl); 176 } 177 178 job->addr_phys[job->num_unpins] = phys_addr; 179 job->unpins[job->num_unpins].bo = reloc->target.bo; 180 job->unpins[job->num_unpins].sgt = sgt; 181 job->num_unpins++; 182 } 183 184 for (i = 0; i < job->num_gathers; i++) { 185 struct host1x_job_gather *g = &job->gathers[i]; 186 size_t gather_size = 0; 187 struct scatterlist *sg; 188 struct sg_table *sgt; 189 dma_addr_t phys_addr; 190 unsigned long shift; 191 struct iova *alloc; 192 dma_addr_t *phys; 193 unsigned int j; 194 195 g->bo = host1x_bo_get(g->bo); 196 if (!g->bo) { 197 err = -EINVAL; 198 goto unpin; 199 } 200 201 /** 202 * If the host1x is not attached to an IOMMU, there is no need 203 * to map the buffer object for the host1x, since the physical 204 * address can simply be used. 205 */ 206 if (!iommu_get_domain_for_dev(host->dev)) 207 phys = &phys_addr; 208 else 209 phys = NULL; 210 211 sgt = host1x_bo_pin(host->dev, g->bo, phys); 212 if (IS_ERR(sgt)) { 213 err = PTR_ERR(sgt); 214 goto unpin; 215 } 216 217 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { 218 for_each_sgtable_sg(sgt, sg, j) 219 gather_size += sg->length; 220 gather_size = iova_align(&host->iova, gather_size); 221 222 shift = iova_shift(&host->iova); 223 alloc = alloc_iova(&host->iova, gather_size >> shift, 224 host->iova_end >> shift, true); 225 if (!alloc) { 226 err = -ENOMEM; 227 goto unpin; 228 } 229 > 230 err = iommu_map_sgtable(host->domain, 231 iova_dma_addr(&host->iova, alloc), 232 sgt, IOMMU_READ); 233 if (err == 0) { 234 __free_iova(&host->iova, alloc); 235 err = -EINVAL; 236 goto unpin; 237 } 238 239 job->unpins[job->num_unpins].size = gather_size; 240 phys_addr = iova_dma_addr(&host->iova, alloc); 241 } else if (sgt) { 242 err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0); 243 if (err) 244 goto unpin; 245 246 job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; 247 job->unpins[job->num_unpins].dev = host->dev; 248 phys_addr = sg_dma_address(sgt->sgl); 249 } 250 251 job->addr_phys[job->num_unpins] = phys_addr; 252 job->gather_addr_phys[i] = phys_addr; 253 254 job->unpins[job->num_unpins].bo = g->bo; 255 job->unpins[job->num_unpins].sgt = sgt; 256 job->num_unpins++; 257 } 258 259 return 0; 260 261 unpin: 262 host1x_job_unpin(job); 263 return err; 264 } 265 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip
_______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel