Use the shared-memory-pool mechanism for frame queue descriptor and packed frame descriptor record area allocations. Signed-off-by: Roy Pledge <roy.pledge@xxxxxxx> --- drivers/soc/fsl/qbman/qman_ccsr.c | 119 +++++++++++++++++++++----------------- drivers/soc/fsl/qbman/qman_priv.h | 4 +- drivers/soc/fsl/qbman/qman_test.h | 2 - 3 files changed, 68 insertions(+), 57 deletions(-) diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 90bc40c..35c59ca 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -400,63 +400,19 @@ static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num) return -ENODEV; } -/* - * Ideally we would use the DMA API to turn rmem->base into a DMA address - * (especially if iommu translations ever get involved). Unfortunately, the - * DMA API currently does not allow mapping anything that is not backed with - * a struct page. - */ +/* QMan needs two global memory areas initialized at boot time: + 1) FQD: Frame Queue Descriptors used to manage frame queues + 2) PFDR: Packed Frame Queue Descriptor Records used to store frames + Both areas are reserved using the device tree reserved memory framework + and the addresses and sizes are initialized when the QMan device is probed */ static dma_addr_t fqd_a, pfdr_a; static size_t fqd_sz, pfdr_sz; -static int qman_fqd(struct reserved_mem *rmem) -{ - fqd_a = rmem->base; - fqd_sz = rmem->size; - - WARN_ON(!(fqd_a && fqd_sz)); - - return 0; -} -RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); - -static int qman_pfdr(struct reserved_mem *rmem) -{ - pfdr_a = rmem->base; - pfdr_sz = rmem->size; - - WARN_ON(!(pfdr_a && pfdr_sz)); - - return 0; -} -RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); - static unsigned int qm_get_fqid_maxcnt(void) { return fqd_sz / 64; } -/* - * Flush this memory range from data cache so that QMAN originated - * transactions for this memory region could be marked non-coherent. - */ -static int zero_priv_mem(struct device *dev, struct device_node *node, - phys_addr_t addr, size_t sz) -{ - /* map as cacheable, non-guarded */ - void __iomem *tmpp = ioremap_prot(addr, sz, 0); - - if (!tmpp) - return -ENOMEM; - - memset_io(tmpp, 0, sz); - flush_dcache_range((unsigned long)tmpp, - (unsigned long)tmpp + sz); - iounmap(tmpp); - - return 0; -} - static void log_edata_bits(struct device *dev, u32 bit_count) { u32 i, j, mask = 0xffffffff; @@ -687,11 +643,12 @@ static int qman_resource_init(struct device *dev) static int fsl_qman_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node; + struct device_node *mem_node, *node = dev->of_node; struct resource *res; int ret, err_irq; u16 id; u8 major, minor; + u64 size; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { @@ -727,10 +684,66 @@ static int fsl_qman_probe(struct platform_device *pdev) qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; } - ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); - WARN_ON(ret); - if (ret) + /* Order of memory regions is assumed as FQD followed by PFDR + in order to ensure allocations from the correct regions the + driver initializes then allocates each piece in order */ + + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0); + if (ret) { + dev_err(dev, "of_reserved_mem_device_init_by_idx(0) failed 0x%x\n", + ret); + return -ENODEV; + } + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (mem_node) { + ret = of_property_read_u64(mem_node, "size", &size); + if (ret) { + dev_err(dev, "FQD: of_address_to_resource fails 0x%x\n", ret); + return -ENODEV; + } + fqd_sz = size; + } else { + dev_err(dev, "No memory-region found for FQD\n"); + return -ENODEV; + } + if (!dma_zalloc_coherent(dev, fqd_sz, &fqd_a, 0)) { + dev_err(dev, "Alloc FQD memory failed\n"); + return -ENODEV; + } + + dev_info(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz); + + /* Disassociate the FQD reseverd memory area from the device because + a device can only have one DMA memory area. This should be fine + since the memory is allocated and initalized and only ever accessed + by the QMan device from now on */ + of_reserved_mem_device_release(dev); + + /* Setup PFDR memory */ + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 1); + if (ret) { + dev_err(dev, "of_reserved_mem_device_init(1) failed 0x%x\n", + ret); + return -ENODEV; + } + mem_node = of_parse_phandle(dev->of_node, "memory-region", 1); + if (mem_node) { + ret = of_property_read_u64(mem_node, "size", &size); + if (ret) { + dev_err(dev, "PFDR: of_address_to_resource fails 0x%x\n", ret); + return -ENODEV; + } + pfdr_sz = size; + } else { + dev_err(dev, "No memory-region found for PFDR\n"); + return -ENODEV; + } + if (!dma_zalloc_coherent(dev, pfdr_sz, &pfdr_a, 0)) { + dev_err(dev, "Alloc PFDR Failed size 0x%zx\n", pfdr_sz); return -ENODEV; + } + + dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz); ret = qman_init_ccsr(dev); if (ret) { diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 22725bd..1e998ea5 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -28,12 +28,12 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include "dpaa_sys.h" #include <soc/fsl/qman.h> #include <linux/iommu.h> +#include <linux/dma-contiguous.h> +#include <linux/of_address.h> #if defined(CONFIG_FSL_PAMU) #include <asm/fsl_pamu_stash.h> diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h index d5f8cb2..41bdbc48 100644 --- a/drivers/soc/fsl/qbman/qman_test.h +++ b/drivers/soc/fsl/qbman/qman_test.h @@ -30,7 +30,5 @@ #include "qman_priv.h" -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - int qman_test_stash(void); int qman_test_api(void); -- 2.9.3 -- To unsubscribe from this list: send the line "unsubscribe devicetree" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html