On 2019年08月16日 20:58, Dan Carpenter wrote:
On Fri, Aug 16, 2019 at 10:25:49AM +0800, Zhao Yakui wrote:
+int hugepage_map_guest(struct acrn_vm *vm, struct vm_memmap *memmap)
+{
+ struct page *page = NULL, *regions_buf_pg = NULL;
+ unsigned long len, guest_gpa, vma;
+ struct vm_memory_region *region_array;
+ struct set_regions *regions;
+ int max_size = PAGE_SIZE / sizeof(struct vm_memory_region);
+ int ret;
+
+ if (!vm || !memmap)
+ return -EINVAL;
+
+ len = memmap->len;
+ vma = memmap->vma_base;
+ guest_gpa = memmap->gpa;
+
+ /* prepare set_memory_regions info */
+ regions_buf_pg = alloc_page(GFP_KERNEL);
+ if (!regions_buf_pg)
+ return -ENOMEM;
+
+ regions = kzalloc(sizeof(*regions), GFP_KERNEL);
+ if (!regions) {
+ __free_page(regions_buf_pg);
+ return -ENOMEM;
It's better to do a goto err_free_regions_buf here. More comments
below.
+ }
+ regions->mr_num = 0;
+ regions->vmid = vm->vmid;
+ regions->regions_gpa = page_to_phys(regions_buf_pg);
+ region_array = page_to_virt(regions_buf_pg);
+
+ while (len > 0) {
+ unsigned long vm0_gpa, pagesize;
+
+ ret = get_user_pages_fast(vma, 1, 1, &page);
+ if (unlikely(ret != 1) || (!page)) {
+ pr_err("failed to pin huge page!\n");
+ ret = -ENOMEM;
+ goto err;
goto err is a red flag. It's better if error labels do one specific
named thing like:
err_regions:
kfree(regions);
err_free_regions_buf:
__free_page(regions_buf_pg);
We should unwind in the opposite/mirror order from how things were
allocated. Then we can remove the if statements in the error handling.
Thanks for the review.
Will follow your suggestion to unwind the error handling.
In this situation, say the user triggers an -EFAULT in
get_user_pages_fast() in the second iteration through the loop. That
means that "page" is the non-NULL page from the previous iteration. We
have already added it to add_guest_map(). But now we're freeing it
without removing it from the map so probably it leads to a use after
free.
The best way to write the error handling in a loop like this is to
clean up the partial iteration that has succeed (nothing here), and then
unwind all the successful iterations at the bottom of the function.
"goto unwind_loop;"
In theory we should cleanup the previous success iteration if it
encounters one error in the current iteration.
But it will be quite complex to cleanup up the previous iteration.
call the set_memory_regions for MR_DEL op.
call the remove_guest_map for the added hash item
call the put_page for returned page in get_user_pages_fast.
In fact as this driver is mainly used for embedded IOT usage, it doesn't
handle the complex cleanup when such error is encountered. Instead the
clean up is handled in free_guest_vm.
+ }
+
+ vm0_gpa = page_to_phys(page);
+ pagesize = PAGE_SIZE << compound_order(page);
+
+ ret = add_guest_map(vm, vm0_gpa, guest_gpa, pagesize);
+ if (ret < 0) {
+ pr_err("failed to add memseg for huge page!\n");
+ goto err;
So then here, it would be:
pr_err("failed to add memseg for huge page!\n");
put_page(page);
goto unwind_loop;
regards,
dan carpenter
+ }
+
+ /* fill each memory region into region_array */
+ region_array[regions->mr_num].type = MR_ADD;
+ region_array[regions->mr_num].gpa = guest_gpa;
+ region_array[regions->mr_num].vm0_gpa = vm0_gpa;
+ region_array[regions->mr_num].size = pagesize;
+ region_array[regions->mr_num].prot =
+ (MEM_TYPE_WB & MEM_TYPE_MASK) |
+ (memmap->prot & MEM_ACCESS_RIGHT_MASK);
+ regions->mr_num++;
+ if (regions->mr_num == max_size) {
+ pr_debug("region buffer full, set & renew regions!\n");
+ ret = set_memory_regions(regions);
+ if (ret < 0) {
+ pr_err("failed to set regions,ret=%d!\n", ret);
+ goto err;
+ }
+ regions->mr_num = 0;
+ }
+
+ len -= pagesize;
+ vma += pagesize;
+ guest_gpa += pagesize;
+ }
+
+ ret = set_memory_regions(regions);
+ if (ret < 0) {
+ pr_err("failed to set regions, ret=%d!\n", ret);
+ goto err;
+ }
+
+ __free_page(regions_buf_pg);
+ kfree(regions);
+
+ return 0;
+err:
+ if (regions_buf_pg)
+ __free_page(regions_buf_pg);
+ if (page)
+ put_page(page);
+ kfree(regions);
+ return ret;
+}
+
_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel