> >> + vpmem->vdev = vdev; > >> + vdev->priv = vpmem; > >> + err = init_vq(vpmem); > >> + if (err) { > >> + dev_err(&vdev->dev, "failed to initialize virtio pmem vq's\n"); > >> + goto out_err; > >> + } > >> + > >> + virtio_cread(vpmem->vdev, struct virtio_pmem_config, > >> + start, &vpmem->start); > >> + virtio_cread(vpmem->vdev, struct virtio_pmem_config, > >> + size, &vpmem->size); > >> + > >> + res.start = vpmem->start; > >> + res.end = vpmem->start + vpmem->size-1; > > > > nit: " - 1;" > > > >> + vpmem->nd_desc.provider_name = "virtio-pmem"; > >> + vpmem->nd_desc.module = THIS_MODULE; > >> + > >> + vpmem->nvdimm_bus = nvdimm_bus_register(&vdev->dev, > >> + &vpmem->nd_desc); > >> + if (!vpmem->nvdimm_bus) { > >> + dev_err(&vdev->dev, "failed to register device with nvdimm_bus\n"); > >> + err = -ENXIO; > >> + goto out_vq; > >> + } > >> + > >> + dev_set_drvdata(&vdev->dev, vpmem->nvdimm_bus); > >> + > >> + ndr_desc.res = &res; > >> + ndr_desc.numa_node = nid; > >> + ndr_desc.flush = async_pmem_flush; > >> + set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); > >> + set_bit(ND_REGION_ASYNC, &ndr_desc.flags); > >> + nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc); > >> + if (!nd_region) { > >> + dev_err(&vdev->dev, "failed to create nvdimm region\n"); > >> + err = -ENXIO; > >> + goto out_nd; > >> + } > >> + nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent); > >> + return 0; > >> +out_nd: > >> + nvdimm_bus_unregister(vpmem->nvdimm_bus); > >> +out_vq: > >> + vdev->config->del_vqs(vdev); > >> +out_err: > >> + return err; > >> +} > >> + > >> +static void virtio_pmem_remove(struct virtio_device *vdev) > >> +{ > >> + struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev); > >> + > >> + nvdimm_bus_unregister(nvdimm_bus); > >> + vdev->config->del_vqs(vdev); > >> + vdev->config->reset(vdev); > >> +} > >> + > >> +static struct virtio_driver virtio_pmem_driver = { > >> + .driver.name = KBUILD_MODNAME, > >> + .driver.owner = THIS_MODULE, > >> + .id_table = id_table, > >> + .probe = virtio_pmem_probe, > >> + .remove = virtio_pmem_remove, > >> +}; > >> + > >> +module_virtio_driver(virtio_pmem_driver); > >> +MODULE_DEVICE_TABLE(virtio, id_table); > >> +MODULE_DESCRIPTION("Virtio pmem driver"); > >> +MODULE_LICENSE("GPL"); > >> diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h > >> new file mode 100644 > >> index 000000000000..ab1da877575d > >> --- /dev/null > >> +++ b/drivers/nvdimm/virtio_pmem.h > >> @@ -0,0 +1,60 @@ > >> +/* SPDX-License-Identifier: GPL-2.0 */ > >> +/* > >> + * virtio_pmem.h: virtio pmem Driver > >> + * > >> + * Discovers persistent memory range information > >> + * from host and provides a virtio based flushing > >> + * interface. > >> + **/ > >> + > >> +#ifndef _LINUX_VIRTIO_PMEM_H > >> +#define _LINUX_VIRTIO_PMEM_H > >> + > >> +#include <linux/virtio_ids.h> > >> +#include <linux/module.h> > >> +#include <linux/virtio_config.h> > >> +#include <uapi/linux/virtio_pmem.h> > >> +#include <linux/libnvdimm.h> > >> +#include <linux/spinlock.h> > >> + > >> +struct virtio_pmem_request { > >> + /* Host return status corresponding to flush request */ > >> + int ret; > >> + > >> + /* command name*/ > >> + char name[16]; > > > > So ... why are we sending string commands and expect native-endianess > > integers and don't define a proper request/response structure + request > > types in include/uapi/linux/virtio_pmem.h like > > > > struct virtio_pmem_resp { > > __virtio32 ret; > > } > > FWIW, I wonder if we should even properly translate return values and > define types like > > VIRTIO_PMEM_RESP_TYPE_OK 0 > VIRTIO_PMEM_RESP_TYPE_EIO 1 Don't think these are required as only failure and success return types easy to understand. Thanks, Pankaj > > .. > > > > > #define VIRTIO_PMEM_REQ_TYPE_FLUSH 1 > > struct virtio_pmem_req { > > __virtio16 type; > > } > > > > ... and this way we also define a proper endianess format for exchange > > and keep it extensible > > > > @MST, what's your take on this? > > > > > > > -- > > Thanks, > > David / dhildenb > _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization