On Wed 11 Oct 14:29 PDT 2017, Chris Lew wrote: > From: Chris Lew <clew@xxxxxxxxxxxxxx> > > SMEM V12 creates a global partition to allocate global smem items from > instead of a global heap. The global partition has the same structure as > a private partition. > Acked-by: Bjorn Andersson <bjorn.andersson@xxxxxxxxxx> Regards, Bjorn > Signed-off-by: Chris Lew <clew@xxxxxxxxxxxxxx> > --- > > Changes since v1: > - Move V12 descriptions to top comment > - Add cacheline support to global partition > - Add ptable get helper function > - Move global partition init to version check > > Changes since v2: > - Return -ENOENT if partition table does not exist > - Exclude -ENOENT error propagation from enumerate_partitions() > - Reduce subject to 50 chars and wrap summary to 72 chars > > drivers/soc/qcom/smem.c | 172 +++++++++++++++++++++++++++++++++++++++--------- > 1 file changed, 142 insertions(+), 30 deletions(-) > > diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c > index 540322ae409e..6a3134e9c591 100644 > --- a/drivers/soc/qcom/smem.c > +++ b/drivers/soc/qcom/smem.c > @@ -55,6 +55,10 @@ > * is hence the region between the cached and non-cached offsets. The header of > * cached items comes after the data. > * > + * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure > + * for the global heap. A new global partition is created from the global heap > + * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is > + * set by the bootloader. > * > * To synchronize allocations in the shared memory heaps a remote spinlock must > * be held - currently lock number 3 of the sfpb or tcsr is used for this on all > @@ -68,7 +72,8 @@ > * version is a valid version as a sanity check. > */ > #define SMEM_MASTER_SBL_VERSION_INDEX 7 > -#define SMEM_EXPECTED_VERSION 11 > +#define SMEM_GLOBAL_HEAP_VERSION 11 > +#define SMEM_GLOBAL_PART_VERSION 12 > > /* > * The first 8 items are only to be allocated by the boot loader while > @@ -82,6 +87,9 @@ > /* Processor/host identifier for the application processor */ > #define SMEM_HOST_APPS 0 > > +/* Processor/host identifier for the global partition */ > +#define SMEM_GLOBAL_HOST 0xfffe > + > /* Max number of processors/hosts in a system */ > #define SMEM_HOST_COUNT 9 > > @@ -230,6 +238,8 @@ struct smem_region { > * struct qcom_smem - device data for the smem device > * @dev: device pointer > * @hwlock: reference to a hwspinlock > + * @global_partition: pointer to global partition when in use > + * @global_cacheline: cacheline size for global partition > * @partitions: list of pointers to partitions affecting the current > * processor/host > * @cacheline: list of cacheline sizes for each host > @@ -241,6 +251,8 @@ struct qcom_smem { > > struct hwspinlock *hwlock; > > + struct smem_partition_header *global_partition; > + size_t global_cacheline; > struct smem_partition_header *partitions[SMEM_HOST_COUNT]; > size_t cacheline[SMEM_HOST_COUNT]; > > @@ -317,16 +329,14 @@ static void *cached_entry_to_item(struct smem_private_entry *e) > #define HWSPINLOCK_TIMEOUT 1000 > > static int qcom_smem_alloc_private(struct qcom_smem *smem, > - unsigned host, > + struct smem_partition_header *phdr, > unsigned item, > size_t size) > { > - struct smem_partition_header *phdr; > struct smem_private_entry *hdr, *end; > size_t alloc_size; > void *cached; > > - phdr = smem->partitions[host]; > hdr = phdr_to_first_uncached_entry(phdr); > end = phdr_to_last_uncached_entry(phdr); > cached = phdr_to_last_cached_entry(phdr); > @@ -334,8 +344,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, > while (hdr < end) { > if (hdr->canary != SMEM_PRIVATE_CANARY) { > dev_err(smem->dev, > - "Found invalid canary in host %d partition\n", > - host); > + "Found invalid canary in hosts %d:%d partition\n", > + phdr->host0, phdr->host1); > return -EINVAL; > } > > @@ -373,8 +383,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, > unsigned item, > size_t size) > { > - struct smem_header *header; > struct smem_global_entry *entry; > + struct smem_header *header; > > if (WARN_ON(item >= SMEM_ITEM_COUNT)) > return -EINVAL; > @@ -416,6 +426,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, > */ > int qcom_smem_alloc(unsigned host, unsigned item, size_t size) > { > + struct smem_partition_header *phdr; > unsigned long flags; > int ret; > > @@ -434,10 +445,15 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) > if (ret) > return ret; > > - if (host < SMEM_HOST_COUNT && __smem->partitions[host]) > - ret = qcom_smem_alloc_private(__smem, host, item, size); > - else > + if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { > + phdr = __smem->partitions[host]; > + ret = qcom_smem_alloc_private(__smem, phdr, item, size); > + } else if (__smem->global_partition) { > + phdr = __smem->global_partition; > + ret = qcom_smem_alloc_private(__smem, phdr, item, size); > + } else { > ret = qcom_smem_alloc_global(__smem, item, size); > + } > > hwspin_unlock_irqrestore(__smem->hwlock, &flags); > > @@ -479,16 +495,12 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, > } > > static void *qcom_smem_get_private(struct qcom_smem *smem, > - unsigned host, > + struct smem_partition_header *phdr, > + size_t cacheline, > unsigned item, > size_t *size) > { > - struct smem_partition_header *phdr; > struct smem_private_entry *e, *end; > - size_t cacheline; > - > - phdr = smem->partitions[host]; > - cacheline = smem->cacheline[host]; > > e = phdr_to_first_uncached_entry(phdr); > end = phdr_to_last_uncached_entry(phdr); > @@ -531,7 +543,8 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, > return ERR_PTR(-ENOENT); > > invalid_canary: > - dev_err(smem->dev, "Found invalid canary in host %d partition\n", host); > + dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n", > + phdr->host0, phdr->host1); > > return ERR_PTR(-EINVAL); > } > @@ -547,7 +560,9 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, > */ > void *qcom_smem_get(unsigned host, unsigned item, size_t *size) > { > + struct smem_partition_header *phdr; > unsigned long flags; > + size_t cacheln; > int ret; > void *ptr = ERR_PTR(-EPROBE_DEFER); > > @@ -560,10 +575,17 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) > if (ret) > return ERR_PTR(ret); > > - if (host < SMEM_HOST_COUNT && __smem->partitions[host]) > - ptr = qcom_smem_get_private(__smem, host, item, size); > - else > + if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { > + phdr = __smem->partitions[host]; > + cacheln = __smem->cacheline[host]; > + ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); > + } else if (__smem->global_partition) { > + phdr = __smem->global_partition; > + cacheln = __smem->global_cacheline; > + ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); > + } else { > ptr = qcom_smem_get_global(__smem, item, size); > + } > > hwspin_unlock_irqrestore(__smem->hwlock, &flags); > > @@ -592,6 +614,10 @@ int qcom_smem_get_free_space(unsigned host) > phdr = __smem->partitions[host]; > ret = le32_to_cpu(phdr->offset_free_cached) - > le32_to_cpu(phdr->offset_free_uncached); > + } else if (__smem->global_partition) { > + phdr = __smem->global_partition; > + ret = le32_to_cpu(phdr->offset_free_cached) - > + le32_to_cpu(phdr->offset_free_uncached); > } else { > header = __smem->regions[0].virt_base; > ret = le32_to_cpu(header->available); > @@ -612,27 +638,106 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem) > return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); > } > > -static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, > - unsigned local_host) > +static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) > { > - struct smem_partition_header *header; > - struct smem_ptable_entry *entry; > struct smem_ptable *ptable; > - unsigned remote_host; > - u32 version, host0, host1; > - int i; > + u32 version; > > ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; > if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) > - return 0; > + return ERR_PTR(-ENOENT); > > version = le32_to_cpu(ptable->version); > if (version != 1) { > dev_err(smem->dev, > "Unsupported partition header version %d\n", version); > + return ERR_PTR(-EINVAL); > + } > + return ptable; > +} > + > +static int qcom_smem_set_global_partition(struct qcom_smem *smem) > +{ > + struct smem_partition_header *header; > + struct smem_ptable_entry *entry = NULL; > + struct smem_ptable *ptable; > + u32 host0, host1, size; > + int i; > + > + ptable = qcom_smem_get_ptable(smem); > + if (IS_ERR(ptable)) > + return PTR_ERR(ptable); > + > + for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { > + entry = &ptable->entry[i]; > + host0 = le16_to_cpu(entry->host0); > + host1 = le16_to_cpu(entry->host1); > + > + if (host0 == SMEM_GLOBAL_HOST && host0 == host1) > + break; > + } > + > + if (!entry) { > + dev_err(smem->dev, "Missing entry for global partition\n"); > + return -EINVAL; > + } > + > + if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) { > + dev_err(smem->dev, "Invalid entry for global partition\n"); > + return -EINVAL; > + } > + > + if (smem->global_partition) { > + dev_err(smem->dev, "Already found the global partition\n"); > + return -EINVAL; > + } > + > + header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); > + host0 = le16_to_cpu(header->host0); > + host1 = le16_to_cpu(header->host1); > + > + if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { > + dev_err(smem->dev, "Global partition has invalid magic\n"); > + return -EINVAL; > + } > + > + if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) { > + dev_err(smem->dev, "Global partition hosts are invalid\n"); > + return -EINVAL; > + } > + > + if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) { > + dev_err(smem->dev, "Global partition has invalid size\n"); > return -EINVAL; > } > > + size = le32_to_cpu(header->offset_free_uncached); > + if (size > le32_to_cpu(header->size)) { > + dev_err(smem->dev, > + "Global partition has invalid free pointer\n"); > + return -EINVAL; > + } > + > + smem->global_partition = header; > + smem->global_cacheline = le32_to_cpu(entry->cacheline); > + > + return 0; > +} > + > +static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, > + unsigned int local_host) > +{ > + struct smem_partition_header *header; > + struct smem_ptable_entry *entry; > + struct smem_ptable *ptable; > + unsigned int remote_host; > + u32 host0, host1; > + int i; > + > + ptable = qcom_smem_get_ptable(smem); > + if (IS_ERR(ptable)) > + return PTR_ERR(ptable); > + > for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { > entry = &ptable->entry[i]; > host0 = le16_to_cpu(entry->host0); > @@ -773,13 +878,20 @@ static int qcom_smem_probe(struct platform_device *pdev) > } > > version = qcom_smem_get_sbl_version(smem); > - if (version >> 16 != SMEM_EXPECTED_VERSION) { > + switch (version >> 16) { > + case SMEM_GLOBAL_PART_VERSION: > + ret = qcom_smem_set_global_partition(smem); > + if (ret < 0) > + return ret; > + case SMEM_GLOBAL_HEAP_VERSION: > + break; > + default: > dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); > return -EINVAL; > } > > ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); > - if (ret < 0) > + if (ret < 0 && ret != -ENOENT) > return ret; > > hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); > -- > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum, > a Linux Foundation Collaborative Project > -- To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html