Am 21.09.2017 um 18:30 schrieb Samuel Li: > v2: drop hdp invalidate/flush. > v3: honor pgoff during prime mmap. Add a barrier after cpu access. > v4: drop begin/end_cpu_access() for now, revisit later. > > Signed-off-by: Samuel Li <Samuel.Li at amd.com> Reviewed-by: Christian König <christian.koenig at amd.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + > drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + > drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 34 +++++++++++++++++++++++++++++++ > 3 files changed, 36 insertions(+) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h > index d2aaad7..edcfa13 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h > @@ -400,6 +400,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); > struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); > void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); > void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); > +int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); > int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); > > /* sub-allocation manager, it has to be protected by another lock. > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c > index 2cdf844..19c0499 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c > @@ -843,6 +843,7 @@ static struct drm_driver kms_driver = { > .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, > .gem_prime_vmap = amdgpu_gem_prime_vmap, > .gem_prime_vunmap = amdgpu_gem_prime_vunmap, > + .gem_prime_mmap = amdgpu_gem_prime_mmap, > > .name = DRIVER_NAME, > .desc = DRIVER_DESC, > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c > index 5b3f928..83241ca 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c > @@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) > ttm_bo_kunmap(&bo->dma_buf_vmap); > } > > +int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) > +{ > + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); > + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); > + unsigned asize = amdgpu_bo_size(bo); > + int ret; > + > + if (!vma->vm_file) > + return -ENODEV; > + > + if (adev == NULL) > + return -ENODEV; > + > + /* Check for valid size. */ > + if (asize < vma->vm_end - vma->vm_start) > + return -EINVAL; > + > + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || > + (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { > + return -EPERM; > + } > + vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; > + > + /* prime mmap does not need to check access, so allow here */ > + ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); > + if (ret) > + return ret; > + > + ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); > + drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); > + > + return ret; > +} > + > struct drm_gem_object * > amdgpu_gem_prime_import_sg_table(struct drm_device *dev, > struct dma_buf_attachment *attach,