Steven Price <steven.price@xxxxxxx> writes: > +static int alloc_rec_aux(struct page **aux_pages, > + u64 *aux_phys_pages, > + unsigned int num_aux) > +{ > + int ret; > + unsigned int i; > + > + for (i = 0; i < num_aux; i++) { > + struct page *aux_page; > + phys_addr_t aux_page_phys; > + > + aux_page = alloc_page(GFP_KERNEL); > + if (!aux_page) { > + ret = -ENOMEM; > + goto out_err; > + } > + aux_page_phys = page_to_phys(aux_page); > + if (rmi_granule_delegate(aux_page_phys)) { > + __free_page(aux_page); > + ret = -ENXIO; > + goto out_err; > + } > + aux_pages[i] = aux_page; > + aux_phys_pages[i] = aux_page_phys; > + } > + > + return 0; > +out_err: > + free_rec_aux(aux_pages, i); > + return ret; > +} > We can possibly switch the above to the alloc/free helper so that all granule allocation is done by alloc_delegated_granule() ? static int alloc_rec_aux(struct realm *realm, u64 *aux_phys_pages, unsigned int num_aux) { int ret; unsigned int i; for (i = 0; i < num_aux; i++) { phys_addr_t aux_page_phys; aux_page_phys = alloc_delegated_granule(realm, NULL, GFP_KERNEL); if (aux_page_phys == PHYS_ADDR_MAX) { ret = -ENOMEM; goto out_err; } aux_phys_pages[i] = aux_page_phys; } return 0; out_err: free_rec_aux(realm, aux_phys_pages, i); return ret; }