On Mon, 4 Dec 2023 18:33:00 +0100 Boris Brezillon <boris.brezillon@xxxxxxxxxxxxx> wrote: > +static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx, > + struct panthor_vm *vm, > + u64 va, u64 size) > +{ > + u32 pt_count = 0; > + int ret; > + > + memset(op_ctx, 0, sizeof(*op_ctx)); > + INIT_LIST_HEAD(&op_ctx->returned_vmas); > + op_ctx->va.range = size; > + op_ctx->va.addr = va; > + op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP; > + > + /* Pre-allocate L3 page tables to account for the split-2M-block > + * situation on unmap. > + */ > + if (va != ALIGN(va, SZ_2M)) > + pt_count++; > + > + if (va + size != ALIGN(va + size, SZ_2M) && > + ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M)) > + pt_count++; > + > + if (pt_count) { > + op_ctx->rsvd_page_tables.pages = kcalloc(pt_count, > + sizeof(*op_ctx->rsvd_page_tables.pages), > + GFP_KERNEL); > + if (!op_ctx->rsvd_page_tables.pages) > + goto err_cleanup; > + > + ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count, > + op_ctx->rsvd_page_tables.pages); > + if (ret != pt_count) { > + ret = -ENOMEM; > + goto err_cleanup; > + } > + op_ctx->rsvd_page_tables.count = pt_count; > + } We also need to allocate {prev,next}_vma objects, because partial unmaps will call sm_step_remap(), which leads to a NULL deref if we don't pre-allocate these objects. I'm reworking the code to have a 3-entry array of preallocated VMAs instead of having ->map.{prev,new,next}_vma fields. We can then populate this array when preparing an op_ctx, and pick from this preallocated pool in sm_step_[re]map(). > + > + return 0; > + > +err_cleanup: > + panthor_vm_cleanup_op_ctx(op_ctx, vm); > + return ret; > +}