[no subject]

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> +
> +	if (epc_features->bar[BAR_0].type == BAR_FIXED) {
> +		if (reg_bar_size > epc_features->bar[BAR_0].fixed_size) {
> +			dev_err(&epf->dev,
> +				"Reg BAR 0 size %llu B too small, need %zu B\n",
> +				epc_features->bar[BAR_0].fixed_size,
> +				reg_bar_size);
> +			return -ENOMEM;
> +		}
> +		reg_bar_size = epc_features->bar[BAR_0].fixed_size;
> +	}
> +
> +	nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
> +						epc_features, PRIMARY_INTERFACE);
> +	if (!nvme_epf->reg_bar) {
> +		dev_err(&epf->dev, "Allocate BAR 0 failed\n");

'Failed to allocate memory for BAR 0'

> +		return -ENOMEM;
> +	}
> +	memset(nvme_epf->reg_bar, 0, reg_bar_size);
> +
> +	return 0;
> +}
> +
> +static void nvmet_pciep_epf_clear_bar(struct nvmet_pciep_epf *nvme_epf)
> +{
> +	struct pci_epf *epf = nvme_epf->epf;
> +
> +	pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
> +			  &epf->bar[BAR_0]);
> +	pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
> +	nvme_epf->reg_bar = NULL;
> +}
> +
> +static int nvmet_pciep_epf_init_irq(struct nvmet_pciep_epf *nvme_epf)
> +{
> +	const struct pci_epc_features *epc_features = nvme_epf->epc_features;
> +	struct pci_epf *epf = nvme_epf->epf;
> +	int ret;
> +
> +	/* Enable MSI-X if supported, otherwise, use MSI */
> +	if (epc_features->msix_capable && epf->msix_interrupts) {
> +		ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
> +				       epf->msix_interrupts, BAR_0,
> +				       nvme_epf->msix_table_offset);
> +		if (ret) {
> +			dev_err(&epf->dev, "MSI-X configuration failed\n");
> +			return ret;
> +		}
> +
> +		nvme_epf->nr_vectors = epf->msix_interrupts;
> +		nvme_epf->irq_type = PCI_IRQ_MSIX;
> +
> +		return 0;
> +	}
> +
> +	if (epc_features->msi_capable && epf->msi_interrupts) {
> +		ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
> +				      epf->msi_interrupts);
> +		if (ret) {
> +			dev_err(&epf->dev, "MSI configuration failed\n");
> +			return ret;
> +		}
> +
> +		nvme_epf->nr_vectors = epf->msi_interrupts;
> +		nvme_epf->irq_type = PCI_IRQ_MSI;
> +
> +		return 0;
> +	}
> +
> +	/* MSI and MSI-X are not supported: fall back to INTX */
> +	nvme_epf->nr_vectors = 1;
> +	nvme_epf->irq_type = PCI_IRQ_INTX;
> +
> +	return 0;
> +}
> +
> +static int nvmet_pciep_epf_epc_init(struct pci_epf *epf)
> +{
> +	struct nvmet_pciep_epf *nvme_epf = epf_get_drvdata(epf);
> +	const struct pci_epc_features *epc_features = nvme_epf->epc_features;
> +	struct nvmet_pciep_ctrl *ctrl = &nvme_epf->ctrl;
> +	unsigned int max_nr_queues = NVMET_NR_QUEUES;
> +	int ret;
> +
> +	/*
> +	 * Cap the maximum number of queues we can support on the controller
> +	 * with the number of IRQs we can use.
> +	 */
> +	if (epc_features->msix_capable && epf->msix_interrupts) {
> +		dev_info(&epf->dev,
> +			 "PCI endpoint controller supports MSI-X, %u vectors\n",
> +			 epf->msix_interrupts);
> +		max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
> +	} else if (epc_features->msi_capable && epf->msi_interrupts) {
> +		dev_info(&epf->dev,
> +			 "PCI endpoint controller supports MSI, %u vectors\n",
> +			 epf->msi_interrupts);
> +		max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
> +	}
> +
> +	if (max_nr_queues < 2) {
> +		dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
> +			max_nr_queues);
> +		return -EINVAL;
> +	}
> +
> +	/* Create the target controller. */
> +	ret = nvmet_pciep_create_ctrl(nvme_epf, max_nr_queues);
> +	if (ret) {
> +		dev_err(&epf->dev,
> +			"Create NVMe PCI target controller failed\n");

Failed to create NVMe PCI target controller

> +		return ret;
> +	}
> +
> +	if (epf->vfunc_no <= 1) {

Are you really supporting virtual functions? If supported, 'vfunc_no < 1' is not
possible.

> +		/* Set device ID, class, etc */
> +		epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
> +		epf->header->subsys_vendor_id =
> +			ctrl->tctrl->subsys->subsys_vendor_id;

Why these are coming from somewhere else and not configured within the EPF
driver?

> +		ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
> +					   epf->header);
> +		if (ret) {
> +			dev_err(&epf->dev,
> +				"Write configuration header failed %d\n", ret);
> +			goto out_destroy_ctrl;
> +		}
> +	}
> +
> +	/* Setup the PCIe BAR and create the controller */
> +	ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
> +			      &epf->bar[BAR_0]);
> +	if (ret) {
> +		dev_err(&epf->dev, "Set BAR 0 failed\n");
> +		goto out_destroy_ctrl;
> +	}
> +
> +	/*
> +	 * Enable interrupts and start polling the controller BAR if we do not
> +	 * have any link up notifier.
> +	 */
> +	ret = nvmet_pciep_epf_init_irq(nvme_epf);
> +	if (ret)
> +		goto out_clear_bar;
> +
> +	if (!epc_features->linkup_notifier) {
> +		ctrl->link_up = true;
> +		nvmet_pciep_start_ctrl(&nvme_epf->ctrl);
> +	}
> +
> +	return 0;
> +
> +out_clear_bar:
> +	nvmet_pciep_epf_clear_bar(nvme_epf);
> +out_destroy_ctrl:
> +	nvmet_pciep_destroy_ctrl(&nvme_epf->ctrl);
> +	return ret;
> +}
> +
> +static void nvmet_pciep_epf_epc_deinit(struct pci_epf *epf)
> +{
> +	struct nvmet_pciep_epf *nvme_epf = epf_get_drvdata(epf);
> +	struct nvmet_pciep_ctrl *ctrl = &nvme_epf->ctrl;
> +
> +	ctrl->link_up = false;
> +	nvmet_pciep_destroy_ctrl(ctrl);
> +
> +	nvmet_pciep_epf_deinit_dma(nvme_epf);
> +	nvmet_pciep_epf_clear_bar(nvme_epf);
> +
> +	mutex_destroy(&nvme_epf->mmio_lock);
> +}
> +
> +static int nvmet_pciep_epf_link_up(struct pci_epf *epf)
> +{
> +	struct nvmet_pciep_epf *nvme_epf = epf_get_drvdata(epf);
> +	struct nvmet_pciep_ctrl *ctrl = &nvme_epf->ctrl;
> +
> +	dev_info(nvme_epf->ctrl.dev, "PCI link up\n");

These prints are supposed to come from the controller drivers. So no need to
have them here also.

> +
> +	ctrl->link_up = true;
> +	nvmet_pciep_start_ctrl(ctrl);
> +
> +	return 0;
> +}
> +

[...]

> +static ssize_t nvmet_pciep_epf_dma_enable_show(struct config_item *item,
> +					    char *page)
> +{
> +	struct config_group *group = to_config_group(item);
> +	struct nvmet_pciep_epf *nvme_epf = to_nvme_epf(group);
> +
> +	return sysfs_emit(page, "%d\n", nvme_epf->dma_enable);
> +}
> +
> +static ssize_t nvmet_pciep_epf_dma_enable_store(struct config_item *item,
> +					     const char *page, size_t len)
> +{
> +	struct config_group *group = to_config_group(item);
> +	struct nvmet_pciep_epf *nvme_epf = to_nvme_epf(group);
> +	int ret;
> +
> +	if (nvme_epf->ctrl.tctrl)
> +		return -EBUSY;
> +
> +	ret = kstrtobool(page, &nvme_epf->dma_enable);
> +	if (ret)
> +		return ret;
> +
> +	return len;
> +}
> +
> +CONFIGFS_ATTR(nvmet_pciep_epf_, dma_enable);

What is the use of making this option user configurable? It is purely a hardware
capability and I don't think users would want to have their NVMe device working
without DMA voluntarily.

> +
> +static ssize_t nvmet_pciep_epf_portid_show(struct config_item *item, char *page)
> +{
> +	struct config_group *group = to_config_group(item);
> +	struct nvmet_pciep_epf *nvme_epf = to_nvme_epf(group);
> +
> +	return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
> +}
> +

[...]

> +static int __init nvmet_pciep_init_module(void)
> +{
> +	int ret;
> +
> +	ret = pci_epf_register_driver(&nvmet_pciep_epf_driver);
> +	if (ret)
> +		return ret;
> +
> +	ret = nvmet_register_transport(&nvmet_pciep_fabrics_ops);

What is the need to register the transport so early? You should consider moving
the registration to bind() so that the transport can be removed once the driver
is unbind with the controller.

- Mani

-- 
மணிவண�ணன� �தா�ிவம�




[Index of Archives]     [DMA Engine]     [Linux Coverity]     [Linux USB]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Greybus]

  Powered by Linux