Reviewed-by: Alistair Popple <alistair@xxxxxxxxxxxx> On Tuesday, 13 November 2018 7:28:06 PM AEDT Alexey Kardashevskiy wrote: > This step is to help removing the npu struct from pnv_phb so it > can be used by pseries as well. > > Signed-off-by: Alexey Kardashevskiy <aik@xxxxxxxxx> > Reviewed-by: David Gibson <david@xxxxxxxxxxxxxxxxxxxxx> > --- > arch/powerpc/platforms/powernv/npu-dma.c | 22 ++++++++++++++++------ > 1 file changed, 16 insertions(+), 6 deletions(-) > > diff --git a/arch/powerpc/platforms/powernv/npu-dma.c > b/arch/powerpc/platforms/powernv/npu-dma.c index 91d488f..9f48831 100644 > --- a/arch/powerpc/platforms/powernv/npu-dma.c > +++ b/arch/powerpc/platforms/powernv/npu-dma.c > @@ -327,6 +327,18 @@ struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct > pnv_ioda_pe *npe) return gpe; > } > > +/* > + * NPU2 ATS > + */ > +static struct npu *npdev_to_npu(struct pci_dev *npdev) > +{ > + struct pnv_phb *nphb; > + > + nphb = pci_bus_to_host(npdev->bus)->private_data; > + > + return &nphb->npu; > +} > + > /* Maximum number of nvlinks per npu */ > #define NV_MAX_LINKS 6 > > @@ -478,7 +490,6 @@ static void acquire_atsd_reg(struct npu_context > *npu_context, int i, j; > struct npu *npu; > struct pci_dev *npdev; > - struct pnv_phb *nphb; > > for (i = 0; i <= max_npu2_index; i++) { > mmio_atsd_reg[i].reg = -1; > @@ -493,8 +504,7 @@ static void acquire_atsd_reg(struct npu_context > *npu_context, if (!npdev) > continue; > > - nphb = pci_bus_to_host(npdev->bus)->private_data; > - npu = &nphb->npu; > + npu = npdev_to_npu(npdev); > mmio_atsd_reg[i].npu = npu; > mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); > while (mmio_atsd_reg[i].reg < 0) { > @@ -690,7 +700,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev > *gpdev, } > > nphb = pci_bus_to_host(npdev->bus)->private_data; > - npu = &nphb->npu; > + npu = npdev_to_npu(npdev); > > /* > * Setup the NPU context table for a particular GPU. These need to be > @@ -764,7 +774,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev > *gpdev, */ > WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev); > > - if (!nphb->npu.nmmu_flush) { > + if (!npu->nmmu_flush) { > /* > * If we're not explicitly flushing ourselves we need to mark > * the thread for global flushes > @@ -810,7 +820,7 @@ void pnv_npu2_destroy_context(struct npu_context > *npu_context, return; > > nphb = pci_bus_to_host(npdev->bus)->private_data; > - npu = &nphb->npu; > + npu = npdev_to_npu(npdev); > nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); > if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", > &nvlink_index)))