+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ if (inatomic && (!access_ok(VERIFY_READ, userptr, size) ||
+ __copy_from_user_inatomic(mem, userptr, size))) {
+ u_free(mem);
+ return ERR_PTR(-EFAULT);
+ } else if (!inatomic && copy_from_user(mem, userptr, size)) {
+ u_free(mem);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return mem;
+}
+
+static int
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
+ struct drm_nouveau_gem_pushbuf *req,
+ struct drm_nouveau_gem_pushbuf_bo *bo,
+ struct drm_nouveau_gem_pushbuf_reloc *reloc);
+
static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ struct drm_nouveau_gem_pushbuf *req,
uint64_t user_buffers, int nr_buffers,
- struct validate_op *op, int *apply_relocs)
+ struct validate_op *op, int *do_reloc)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
int ret, relocs = 0;
+ struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
+
+ if (nr_buffers == 0)
+ return 0;
+restart:
INIT_LIST_HEAD(&op->vram_list);
INIT_LIST_HEAD(&op->gart_list);
INIT_LIST_HEAD(&op->both_list);
- if (nr_buffers == 0)
- return 0;
-
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(cli, "validate_init\n");
- return ret;
+ goto err;
}
ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_ERROR(cli, "validate vram_list\n");
- validate_fini(op, NULL);
- return ret;
+ goto err_fini;
}
relocs += ret;
@@ -537,8 +568,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_ERROR(cli, "validate gart_list\n");
- validate_fini(op, NULL);
- return ret;
+ goto err_fini;
}
relocs += ret;
@@ -546,58 +576,93 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_ERROR(cli, "validate both_list\n");
- validate_fini(op, NULL);
- return ret;
+ goto err_fini;
}
relocs += ret;
+ if (relocs) {
+ if (!reloc) {
+ //reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc), 1);
+ reloc = ERR_PTR(-EFAULT); NV_ERROR(cli, "slowpath!\n");
+ }
+ if (IS_ERR(reloc)) {
+ validate_fini(op, NULL);
+
+ if (PTR_ERR(reloc) == -EFAULT)
+ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc), 0);
+
+ if (IS_ERR(reloc))
+ return PTR_ERR(reloc);
+ goto restart;
+ }
+
+ ret = nouveau_gem_pushbuf_reloc_apply(cli, req, pbbo, reloc);
+ if (ret) {
+ NV_ERROR(cli, "reloc apply: %d\n", ret);
+ /* No validate_fini, already called. */
+ return ret;
+ }
+ u_free(reloc);
+ *do_reloc = 1;
+ }
- *apply_relocs = relocs;
return 0;
-}
-static inline void
-u_free(void *addr)
-{
- if (!is_vmalloc_addr(addr))
- kfree(addr);
- else
- vfree(addr);
+err_fini:
+ validate_fini(op, NULL);
+err:
+ if (reloc)
+ u_free(reloc);
+ return ret;
}
-static inline void *
-u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
+static int
+nouveau_gem_pushbuf_reloc_copy_to_user(struct drm_nouveau_gem_pushbuf *req,
+ struct drm_nouveau_gem_pushbuf_bo *bo)
{
- void *mem;
- void __user *userptr = (void __force __user *)(uintptr_t)user;
+ struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+ (void __force __user *)(uintptr_t)req->buffers;
+ unsigned i;
- size *= nmemb;
+ for (i = 0; i < req->nr_buffers; ++i) {
+ struct drm_nouveau_gem_pushbuf_bo *b = &bo[i];
- mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!mem)
- mem = vmalloc(size);
- if (!mem)
- return ERR_PTR(-ENOMEM);
-
- if (DRM_COPY_FROM_USER(mem, userptr, size)) {
- u_free(mem);
- return ERR_PTR(-EFAULT);
+ if (!b->presumed.valid &&
+ copy_to_user(&upbbo[i].presumed, &b->presumed, sizeof(b->presumed)))
+ return -EFAULT;
}
-
- return mem;
+ return 0;
}
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
- struct drm_nouveau_gem_pushbuf_bo *bo)
+ struct drm_nouveau_gem_pushbuf_bo *bo,
+ struct drm_nouveau_gem_pushbuf_reloc *reloc)
{
- struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
- reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
- if (IS_ERR(reloc))
- return PTR_ERR(reloc);
+ for (i = 0; i < req->nr_buffers; ++i) {
+ struct drm_nouveau_gem_pushbuf_bo *b = &bo[i];
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[i].user_priv;
+
+ if (nvbo->bo.offset == b->presumed.offset &&
+ ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) {
+ b->presumed.valid = 1;
+ continue;
+ }
+
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed.offset = nvbo->bo.offset;
+ b->presumed.valid = 0;
+ }
for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
@@ -664,8 +729,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
-
- u_free(reloc);
return ret;
}
@@ -721,11 +784,11 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return nouveau_abi16_put(abi16, -EINVAL);
}
- push = u_memcpya(req->push, req->nr_push, sizeof(*push));
+ push = u_memcpya(req->push, req->nr_push, sizeof(*push), 0);
if (IS_ERR(push))
return nouveau_abi16_put(abi16, PTR_ERR(push));
- bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+ bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo), 0);
if (IS_ERR(bo)) {
u_free(push);
return nouveau_abi16_put(abi16, PTR_ERR(bo));
@@ -741,7 +804,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
/* Validate buffer list */
- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req, req->buffers,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
@@ -749,15 +812,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_prevalid;
}
- /* Apply any relocations that are required */
- if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
- if (ret) {
- NV_ERROR(cli, "reloc apply: %d\n", ret);
- goto out;
- }
- }
-
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
@@ -837,6 +891,17 @@ out:
validate_fini(&op, fence);
nouveau_fence_unref(&fence);
+ if (do_reloc && !ret) {
+ ret = nouveau_gem_pushbuf_reloc_copy_to_user(req, bo);
+ if (ret) {
+ NV_ERROR(cli, "error copying presumed back to userspace: %d\n", ret);
+ /*
+ * XXX: We return -EFAULT, but command submission
+ * has already been completed.
+ */
+ }
+ }
+
out_prevalid:
u_free(bo);
u_free(push);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1006c15..829e911 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -64,12 +64,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after scheduling.
*/
- ret = ttm_bo_reserve(bo, true, true, false, 0);
- if (unlikely(ret != 0)) {
- if (ret == -EBUSY)
- set_need_resched();
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (unlikely(ret != 0))
return VM_FAULT_NOPAGE;
- }