From: Nuno Das Neves <nunodasneves@xxxxxxxxxxxxxxxxxxx> Sent: Friday, March 14, 2025 12:29 PM > > Provide a set of IOCTLs for creating and managing child partitions when > running as root partition on Hyper-V. The new driver is enabled via > CONFIG_MSHV_ROOT. > [snip] > + > +int > +hv_call_clear_virtual_interrupt(u64 partition_id) > +{ > + unsigned long flags; This local variable is now unused and will generate a compile warning. > + int status; > + > + status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT, > + partition_id); > + > + return hv_result_to_errno(status); > +} > + > +int > +hv_call_create_port(u64 port_partition_id, union hv_port_id port_id, > + u64 connection_partition_id, > + struct hv_port_info *port_info, > + u8 port_vtl, u8 min_connection_vtl, int node) > +{ > + struct hv_input_create_port *input; > + unsigned long flags; > + int ret = 0; > + int status; > + > + do { > + local_irq_save(flags); > + input = *this_cpu_ptr(hyperv_pcpu_input_arg); > + memset(input, 0, sizeof(*input)); > + > + input->port_partition_id = port_partition_id; > + input->port_id = port_id; > + input->connection_partition_id = connection_partition_id; > + input->port_info = *port_info; > + input->port_vtl = port_vtl; > + input->min_connection_vtl = min_connection_vtl; > + input->proximity_domain_info = hv_numa_node_to_pxm_info(node); > + status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL); > + local_irq_restore(flags); > + if (hv_result_success(status)) > + break; > + > + if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { > + ret = hv_result_to_errno(status); > + break; > + } > + ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1); > + > + } while (!ret); > + > + return ret; > +} > + > +int > +hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id) > +{ > + union hv_input_delete_port input = { 0 }; > + unsigned long flags; Unused local variable. > + int status; > + > + input.port_partition_id = port_partition_id; > + input.port_id = port_id; > + status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT, > + input.as_uint64[0], > + input.as_uint64[1]); > + > + return hv_result_to_errno(status); > +} > + > +int > +hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id, > + u64 connection_partition_id, > + union hv_connection_id connection_id, > + struct hv_connection_info *connection_info, > + u8 connection_vtl, int node) > +{ > + struct hv_input_connect_port *input; > + unsigned long flags; > + int ret = 0, status; > + > + do { > + local_irq_save(flags); > + input = *this_cpu_ptr(hyperv_pcpu_input_arg); > + memset(input, 0, sizeof(*input)); > + input->port_partition_id = port_partition_id; > + input->port_id = port_id; > + input->connection_partition_id = connection_partition_id; > + input->connection_id = connection_id; > + input->connection_info = *connection_info; > + input->connection_vtl = connection_vtl; > + input->proximity_domain_info = hv_numa_node_to_pxm_info(node); > + status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL); > + > + local_irq_restore(flags); > + if (hv_result_success(status)) > + break; > + > + if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { > + ret = hv_result_to_errno(status); > + break; > + } > + ret = hv_call_deposit_pages(NUMA_NO_NODE, > + connection_partition_id, 1); > + } while (!ret); > + > + return ret; > +} > + > +int > +hv_call_disconnect_port(u64 connection_partition_id, > + union hv_connection_id connection_id) > +{ > + union hv_input_disconnect_port input = { 0 }; > + unsigned long flags; Unused local variable. > + int status; > + > + input.connection_partition_id = connection_partition_id; > + input.connection_id = connection_id; > + input.is_doorbell = 1; > + status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT, > + input.as_uint64[0], > + input.as_uint64[1]); > + > + return hv_result_to_errno(status); > +} > + > +int > +hv_call_notify_port_ring_empty(u32 sint_index) > +{ > + union hv_input_notify_port_ring_empty input = { 0 }; > + unsigned long flags; Unused. > + int status; > + > + input.sint_index = sint_index; > + status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY, > + input.as_uint64); > + > + return hv_result_to_errno(status); > +} > + > +int hv_call_map_stat_page(enum hv_stats_object_type type, > + const union hv_stats_object_identity *identity, > + void **addr) > +{ > + unsigned long flags; > + struct hv_input_map_stats_page *input; > + struct hv_output_map_stats_page *output; > + u64 status, pfn; > + int ret; > + > + do { > + local_irq_save(flags); > + input = *this_cpu_ptr(hyperv_pcpu_input_arg); > + output = *this_cpu_ptr(hyperv_pcpu_output_arg); > + > + memset(input, 0, sizeof(*input)); > + input->type = type; > + input->identity = *identity; > + > + status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output); > + pfn = output->map_location; > + > + local_irq_restore(flags); > + if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { > + if (hv_result_success(status)) > + break; If this "break" is taken, "ret" may be uninitialized and the return value is stack garbage. This bug was also in v5 and I didn't notice it in my previous review. > + return hv_result_to_errno(status); > + } > + > + ret = hv_call_deposit_pages(NUMA_NO_NODE, > + hv_current_partition_id, 1); > + if (ret) > + return ret; > + } while (!ret); > + > + *addr = page_address(pfn_to_page(pfn)); > + > + return ret; > +} > + > +int hv_call_unmap_stat_page(enum hv_stats_object_type type, > + const union hv_stats_object_identity *identity) > +{ > + unsigned long flags; > + struct hv_input_unmap_stats_page *input; > + u64 status; > + > + local_irq_save(flags); > + input = *this_cpu_ptr(hyperv_pcpu_input_arg); > + > + memset(input, 0, sizeof(*input)); > + input->type = type; > + input->identity = *identity; > + > + status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL); > + local_irq_restore(flags); > + > + return hv_result_to_errno(status); > +} > + > +int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages, > + u64 page_struct_count, u32 host_access, > + u32 flags, u8 acquire) > +{ > + struct hv_input_modify_sparse_spa_page_host_access *input_page; > + u64 status; > + int done = 0; > + unsigned long irq_flags, large_shift = 0; > + u64 page_count = page_struct_count; > + u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS : > + HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS; > + > + if (page_count == 0) > + return -EINVAL; > + > + if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) { > + if (!HV_PAGE_COUNT_2M_ALIGNED(page_count)) > + return -EINVAL; > + large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT; > + page_count >>= large_shift; > + } > + > + while (done < page_count) { > + ulong i, completed, remain = page_count - done; > + int rep_count = min(remain, > + HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT); > + > + local_irq_save(irq_flags); > + input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); > + > + memset(input_page, 0, sizeof(*input_page)); > + /* Only set the partition id if you are making the pages > + * exclusive > + */ > + if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE) > + input_page->partition_id = partition_id; > + input_page->flags = flags; > + input_page->host_access = host_access; > + > + for (i = 0; i < rep_count; i++) { > + u64 index = (done + i) << large_shift; > + > + if (index >= page_struct_count) > + return -EINVAL; > + > + input_page->spa_page_list[i] = > + page_to_pfn(pages[index]); > + } > + > + status = hv_do_rep_hypercall(code, rep_count, 0, input_page, > + NULL); > + local_irq_restore(irq_flags); > + > + completed = hv_repcomp(status); > + > + if (!hv_result_success(status)) > + return hv_result_to_errno(status); > + > + done += completed; > + } > + > + return 0; > +} Stopping here because that's where I stopped in Part 1 of my v5 review of this patch. Michael