On Fri, Aug 02, 2024 at 11:59:31AM +0200, Louis Peens wrote: > From: Kyle Xu <zhenbing.xu@xxxxxxxxxxxx> > > Add a new kernel module ‘nfp_vdpa’ for the NFP vDPA networking driver. > > The vDPA driver initializes the necessary resources on the VF and the > data path will be offloaded. It also implements the ‘vdpa_config_ops’ > and the corresponding callback interfaces according to the requirement > of kernel vDPA framework. > > Signed-off-by: Kyle Xu <zhenbing.xu@xxxxxxxxxxxx> > Signed-off-by: Louis Peens <louis.peens@xxxxxxxxxxxx> ... > diff --git a/drivers/vdpa/netronome/nfp_vdpa_main.c b/drivers/vdpa/netronome/nfp_vdpa_main.c ... > +static int nfp_vdpa_map_resources(struct nfp_vdpa_net *ndev, > + struct pci_dev *pdev, > + const struct nfp_dev_info *dev_info) > +{ > + unsigned int bar_off, bar_sz, tx_bar_sz, rx_bar_sz; > + unsigned int max_tx_rings, max_rx_rings, txq, rxq; > + u64 tx_bar_off, rx_bar_off; > + resource_size_t map_addr; > + void __iomem *tx_bar; > + void __iomem *rx_bar; Hi Kyle and Louis, A minor nit from my side: rx_bar is set but otherwise unused in this function. > + int err; > + > + /* Map CTRL BAR */ > + ndev->ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR), > + NFP_NET_CFG_BAR_SZ); > + if (!ndev->ctrl_bar) > + return -EIO; > + > + /* Find out how many rings are supported */ > + max_tx_rings = readl(ndev->ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); > + max_rx_rings = readl(ndev->ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); > + /* Currently, only one ring is supported */ > + if (max_tx_rings != NFP_VDPA_QUEUE_RING_MAX || max_rx_rings != NFP_VDPA_QUEUE_RING_MAX) { > + err = -EINVAL; > + goto ctrl_bar_unmap; > + } > + > + /* Map Q0_BAR as a single overlapping BAR mapping */ > + tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * NFP_VDPA_QUEUE_SPACE_STRIDE; > + rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * NFP_VDPA_QUEUE_SPACE_STRIDE; > + > + txq = readl(ndev->ctrl_bar + NFP_NET_CFG_START_TXQ); > + tx_bar_off = nfp_qcp_queue_offset(dev_info, txq); > + rxq = readl(ndev->ctrl_bar + NFP_NET_CFG_START_RXQ); > + rx_bar_off = nfp_qcp_queue_offset(dev_info, rxq); > + > + bar_off = min(tx_bar_off, rx_bar_off); > + bar_sz = max(tx_bar_off + tx_bar_sz, rx_bar_off + rx_bar_sz); > + bar_sz -= bar_off; > + > + map_addr = pci_resource_start(pdev, NFP_NET_Q0_BAR) + bar_off; > + ndev->q_bar = ioremap(map_addr, bar_sz); > + if (!ndev->q_bar) { > + err = -EIO; > + goto ctrl_bar_unmap; > + } > + > + tx_bar = ndev->q_bar + (tx_bar_off - bar_off); > + rx_bar = ndev->q_bar + (rx_bar_off - bar_off); > + > + /* TX queues */ > + ndev->vring[txq].kick_addr = ndev->ctrl_bar + NFP_VDPA_NOTIFY_AREA_BASE > + + txq * NFP_VDPA_QUEUE_NOTIFY_OFFSET; > + /* RX queues */ > + ndev->vring[rxq].kick_addr = ndev->ctrl_bar + NFP_VDPA_NOTIFY_AREA_BASE > + + rxq * NFP_VDPA_QUEUE_NOTIFY_OFFSET; > + /* Stash the re-configuration queue away. First odd queue in TX Bar */ > + ndev->qcp_cfg = tx_bar + NFP_QCP_QUEUE_ADDR_SZ; > + > + return 0; > + > +ctrl_bar_unmap: > + iounmap(ndev->ctrl_bar); > + return err; > +} ...