> From: Brett Creeley <brett.creeley@xxxxxxx> > Sent: Thursday, July 20, 2023 6:35 AM > > PDS_LM_CMD_STATUS is added to determine the exact size of the VF > device state data. based on the description PDS_LM_CMD_STATE_SIZE is clearer. > --- a/drivers/vfio/pci/pds/Makefile > +++ b/drivers/vfio/pci/pds/Makefile > @@ -5,5 +5,6 @@ obj-$(CONFIG_PDS_VFIO_PCI) += pds-vfio-pci.o > > pds-vfio-pci-y := \ > cmds.o \ > + lm.o \ nit. "migration.o" is more readable. > +static int pds_vfio_client_adminq_cmd(struct pds_vfio_pci_device *pds_vfio, > + union pds_core_adminq_cmd *req, > + union pds_core_adminq_comp *resp, > + bool fast_poll) > +{ > + union pds_core_adminq_cmd cmd = {}; > + int err; > + > + /* Wrap the client request */ > + cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD; > + cmd.client_request.client_id = cpu_to_le16(pds_vfio->client_id); > + memcpy(cmd.client_request.client_cmd, req, > + sizeof(cmd.client_request.client_cmd)); > + > + err = pdsc_adminq_post(pds_vfio->pdsc, &cmd, resp, fast_poll); > + if (err && err != -EAGAIN) > + dev_info(pds_vfio_to_dev(pds_vfio), > + "client admin cmd failed: %pe\n", ERR_PTR(err)); dev_err() > +void pds_vfio_send_host_vf_lm_status_cmd(struct pds_vfio_pci_device > *pds_vfio, > + enum pds_lm_host_vf_status > vf_status) > +{ > + union pds_core_adminq_cmd cmd = { > + .lm_host_vf_status = { > + .opcode = PDS_LM_CMD_HOST_VF_STATUS, > + .vf_id = cpu_to_le16(pds_vfio->vf_id), > + .status = vf_status, > + }, > + }; > + struct device *dev = pds_vfio_to_dev(pds_vfio); > + union pds_core_adminq_comp comp = {}; > + int err; > + > + dev_dbg(dev, "vf%u: Set host VF LM status: %u", pds_vfio->vf_id, > + vf_status); > + if (vf_status != PDS_LM_STA_IN_PROGRESS && > + vf_status != PDS_LM_STA_NONE) { > + dev_warn(dev, "Invalid host VF migration status, %d\n", > + vf_status); > + return; > + } WARN_ON() as it's a driver bug if passing in unsupported status code. > + > +static struct pds_vfio_lm_file * > +pds_vfio_get_lm_file(const struct file_operations *fops, int flags, u64 size) > +{ > + struct pds_vfio_lm_file *lm_file = NULL; > + unsigned long long npages; > + struct page **pages; > + void *page_mem; > + const void *p; > + > + if (!size) > + return NULL; > + > + /* Alloc file structure */ > + lm_file = kzalloc(sizeof(*lm_file), GFP_KERNEL); > + if (!lm_file) > + return NULL; > + > + /* Create file */ > + lm_file->filep = > + anon_inode_getfile("pds_vfio_lm", fops, lm_file, flags); > + if (!lm_file->filep) > + goto out_free_file; > + > + stream_open(lm_file->filep->f_inode, lm_file->filep); > + mutex_init(&lm_file->lock); > + > + /* prevent file from being released before we are done with it */ > + get_file(lm_file->filep); > + > + /* Allocate memory for file pages */ > + npages = DIV_ROUND_UP_ULL(size, PAGE_SIZE); > + pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); > + if (!pages) > + goto out_put_file; > + > + page_mem = kvzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); > + if (!page_mem) > + goto out_free_pages_array; > + > + p = page_mem - offset_in_page(page_mem); > + for (unsigned long long i = 0; i < npages; i++) { > + if (is_vmalloc_addr(p)) > + pages[i] = vmalloc_to_page(p); > + else > + pages[i] = kmap_to_page((void *)p); > + if (!pages[i]) > + goto out_free_page_mem; > + > + p += PAGE_SIZE; > + } > + > + /* Create scatterlist of file pages to use for DMA mapping later */ > + if (sg_alloc_table_from_pages(&lm_file->sg_table, pages, npages, 0, > + size, GFP_KERNEL)) > + goto out_free_page_mem; > + > + lm_file->size = size; > + lm_file->pages = pages; > + lm_file->npages = npages; > + lm_file->page_mem = page_mem; > + lm_file->alloc_size = npages * PAGE_SIZE; > + > + return lm_file; > + > +out_free_page_mem: > + kvfree(page_mem); > +out_free_pages_array: > + kfree(pages); > +out_put_file: > + fput(lm_file->filep); > + mutex_destroy(&lm_file->lock); > +out_free_file: > + kfree(lm_file); > + > + return NULL; > +} I wonder whether the logic about migration file can be generalized. It's not very maintainable to have every migration driver implementing their own code for similar functions. Did I overlook any device specific setup required here?