This allocates a new nvif_mmu in nouveau_drm, and uses it for TTM backend memory allocations instead of nouveau_drm.master.mmu, which will be removed in a later commit. Signed-off-by: Ben Skeggs <bskeggs@xxxxxxxxxx> --- drivers/gpu/drm/nouveau/nouveau_drm.c | 36 ++++++++++++++++----------- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_mem.c | 12 ++++----- 3 files changed, 29 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 704474e16b1d..652d38a71211 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -227,13 +227,6 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, {} }; static const struct nvif_mclass - mmus[] = { - { NVIF_CLASS_MMU_GF100, -1 }, - { NVIF_CLASS_MMU_NV50 , -1 }, - { NVIF_CLASS_MMU_NV04 , -1 }, - {} - }; - static const struct nvif_mclass vmms[] = { { NVIF_CLASS_VMM_GP100, -1 }, { NVIF_CLASS_VMM_GM200, -1 }, @@ -270,13 +263,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, cli->device.object.map.ptr = drm->device.object.map.ptr; - ret = nvif_mclass(&cli->device.object, mmus); - if (ret < 0) { - NV_PRINTK(err, cli, "No supported MMU class\n"); - goto done; - } - - ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass, + ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", drm->mmu.object.oclass, &cli->mmu); if (ret) { NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret); @@ -572,6 +559,13 @@ nouveau_parent = { static int nouveau_drm_device_init(struct drm_device *dev, struct nvkm_device *nvkm) { + static const struct nvif_mclass + mmus[] = { + { NVIF_CLASS_MMU_GF100, -1 }, + { NVIF_CLASS_MMU_NV50 , -1 }, + { NVIF_CLASS_MMU_NV04 , -1 }, + {} + }; struct nouveau_drm *drm; int ret; @@ -601,6 +595,18 @@ nouveau_drm_device_init(struct drm_device *dev, struct nvkm_device *nvkm) goto fail_nvif; } + ret = nvif_mclass(&drm->device.object, mmus); + if (ret < 0) { + NV_ERROR(drm, "No supported MMU class\n"); + goto fail_nvif; + } + + ret = nvif_mmu_ctor(&drm->device.object, "drmMmu", mmus[ret].oclass, &drm->mmu); + if (ret) { + NV_ERROR(drm, "MMU allocation failed: %d\n", ret); + goto fail_nvif; + } + drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0, WQ_MAX_ACTIVE); if (!drm->sched_wq) { @@ -680,6 +686,7 @@ nouveau_drm_device_init(struct drm_device *dev, struct nvkm_device *nvkm) fail_wq: destroy_workqueue(drm->sched_wq); fail_nvif: + nvif_mmu_dtor(&drm->mmu); nvif_device_dtor(&drm->device); nvif_client_dtor(&drm->master.base); fail_alloc: @@ -736,6 +743,7 @@ nouveau_drm_device_fini(struct drm_device *dev) nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->master); destroy_workqueue(drm->sched_wq); + nvif_mmu_dtor(&drm->mmu); nvif_device_dtor(&drm->device); nvif_client_dtor(&drm->master.base); nvif_parent_dtor(&drm->parent); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 27a4a365669a..0b1cf2f2f9bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -204,6 +204,7 @@ struct nouveau_drm { struct nvkm_device *nvkm; struct nvif_parent parent; struct nvif_device device; + struct nvif_mmu mmu; struct nouveau_cli master; struct nouveau_cli client; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 25f31d5169e5..67f93cf753ba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -91,7 +91,7 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; struct nouveau_drm *drm = cli->drm; - struct nvif_mmu *mmu = &cli->mmu; + struct nvif_mmu *mmu = &drm->mmu; struct nvif_mem_ram_v0 args = {}; u8 type; int ret; @@ -115,7 +115,7 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) args.dma = tt->dma_address; mutex_lock(&drm->master.lock); - ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, + ret = nvif_mem_ctor_type(mmu, "ttmHostMem", mmu->mem, type, PAGE_SHIFT, reg->size, &args, sizeof(args), &mem->mem); mutex_unlock(&drm->master.lock); @@ -128,14 +128,14 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; struct nouveau_drm *drm = cli->drm; - struct nvif_mmu *mmu = &cli->mmu; + struct nvif_mmu *mmu = &drm->mmu; u64 size = ALIGN(reg->size, 1 << page); int ret; mutex_lock(&drm->master.lock); - switch (cli->mem->oclass) { + switch (mmu->mem) { case NVIF_CLASS_MEM_GF100: - ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, + ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem, drm->ttm.type_vram, page, size, &(struct gf100_mem_v0) { .contig = contig, @@ -143,7 +143,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) &mem->mem); break; case NVIF_CLASS_MEM_NV50: - ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, + ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem, drm->ttm.type_vram, page, size, &(struct nv50_mem_v0) { .bankswz = mmu->kind[mem->kind] == 2, -- 2.44.0