Re: [PATCH v2 05/15] media: intel/ipu6: add IPU6 DMA mapping API and MMU table

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

On Tue, 2023-10-24 at 19:29 +0800, bingbu.cao@xxxxxxxxx wrote:
> From: Bingbu Cao <bingbu.cao@xxxxxxxxx>
> 
> he Intel IPU6 has an internal microcontroller (scalar processor, SP)
> which
> is used to execute the firmware. The SP can access IPU internal
> memory and
> map system DRAM to its an internal 32-bit virtual address space.
> 
> This patch adds a driver for the IPU MMU and a DMA mapping
> implementation
> using the internal MMU. The system IOMMU may be used besides the IPU
> MMU.
> 
> Signed-off-by: Bingbu Cao <bingbu.cao@xxxxxxxxx>
> ---
>  drivers/media/pci/intel/ipu6/ipu6-dma.c | 491 ++++++++++++++
>  drivers/media/pci/intel/ipu6/ipu6-dma.h |  20 +
>  drivers/media/pci/intel/ipu6/ipu6-mmu.c | 828
> ++++++++++++++++++++++++
>  drivers/media/pci/intel/ipu6/ipu6-mmu.h |  67 ++
>  4 files changed, 1406 insertions(+)
>  create mode 100644 drivers/media/pci/intel/ipu6/ipu6-dma.c
>  create mode 100644 drivers/media/pci/intel/ipu6/ipu6-dma.h
>  create mode 100644 drivers/media/pci/intel/ipu6/ipu6-mmu.c
>  create mode 100644 drivers/media/pci/intel/ipu6/ipu6-mmu.h
...

> +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
...
> +static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
> +{
> +       struct ipu6_mmu_info *mmu_info;
> +       int ret;
> +
> +       mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
> +       if (!mmu_info)
> +               return NULL;
> +
> +       mmu_info->aperture_start = 0;
> +       mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
> +                                             IPU6_MMU_ADDR_BITS :
> +                                            
> IPU6_MMU_ADDR_BITS_NON_SECURE);
> +       mmu_info->pgsize_bitmap = SZ_4K;
> +       mmu_info->dev = &isp->pdev->dev;
> +
> +       ret = get_dummy_page(mmu_info);
> +       if (ret)
> +               goto err_free_info;
> +
> +       ret = alloc_dummy_l2_pt(mmu_info);
> +       if (ret)
> +               goto err_free_dummy_page;
> +
> +       mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info-
> >l2_pts));
> +       if (!mmu_info->l2_pts)
> +               goto err_free_dummy_l2_pt;
> +
> +       /*
> +        * We always map the L1 page table (a single page as well as
> +        * the L2 page tables).
> +        */
> +       mmu_info->l1_pt = alloc_l1_pt(mmu_info);
> +       if (!mmu_info->l1_pt)
> +               goto err_free_l2_pts;
> +
> +       spin_lock_init(&mmu_info->lock);
> +
> +       dev_dbg(mmu_info->dev, "domain initialised\n");
> +
> +       return mmu_info;
> +
> +err_free_l2_pts:
> +       vfree(mmu_info->l2_pts);
> +err_free_dummy_l2_pt:
> +       free_dummy_l2_pt(mmu_info);
> +err_free_dummy_page:
> +       free_dummy_page(mmu_info);
> +err_free_info:
> +       kfree(mmu_info);
> +
> +       return NULL;
> +}

...


> +static void ipu6_mmu_destroy(struct ipu6_mmu *mmu)
> +{
> +       struct ipu6_dma_mapping *dmap = mmu->dmap;
> +       struct ipu6_mmu_info *mmu_info = dmap->mmu_info;
> +       struct iova *iova;
> +       u32 l1_idx;
> +
> +       if (mmu->iova_trash_page) {
> +               iova = find_iova(&dmap->iovad, PHYS_PFN(mmu-
> >iova_trash_page));
> +               if (iova) {
> +                       /* unmap and free the trash buffer iova */
> +                       ipu6_mmu_unmap(mmu_info, PFN_PHYS(iova-
> >pfn_lo),
> +                                      PFN_PHYS(iova_size(iova)));
> +                       __free_iova(&dmap->iovad, iova);
> +               } else {
> +                       dev_err(mmu->dev, "trash buffer iova not
> found.\n");
> +               }
> +
> +               mmu->iova_trash_page = 0;
> +               dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
> +                              PAGE_SIZE, DMA_BIDIRECTIONAL);
> +               mmu->pci_trash_page = 0;
> +               __free_page(mmu->trash_page);
> +       }
> +
> +       for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
> +               if (mmu_info->l1_pt[l1_idx] != mmu_info-
> >dummy_l2_pteval) {
> +                       dma_unmap_single(mmu_info->dev,
> +                                        TBL_PHYS_ADDR(mmu_info-
> >l1_pt[l1_idx]),
> +                                        PAGE_SIZE,
> DMA_BIDIRECTIONAL);
> +                       free_page((unsigned long)mmu_info-
> >l2_pts[l1_idx]);
> +               }
> +       }
> +
> +       free_dummy_page(mmu_info);
> +       dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info-
> >l1_pt_dma),
> +                        PAGE_SIZE, DMA_BIDIRECTIONAL);
> +       free_page((unsigned long)mmu_info->dummy_l2_pt);
> +       free_page((unsigned long)mmu_info->l1_pt);
> +       kfree(mmu_info);
> +}

mmu_info->l2_pts is leaked here. It is allocated in ipu6_mmu_alloc and
freed in the error path of that function along with the other stuff you
free here, but not anywhere else.

/Andreas




[Index of Archives]     [Linux Input]     [Video for Linux]     [Gstreamer Embedded]     [Mplayer Users]     [Linux USB Devel]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]

  Powered by Linux