SMEM V12 creates a global partition to allocate global smem items from instead of a global heap. The global partition has the same structure as a private partition. Signed-off-by: Chris Lew <clew@xxxxxxxxxxxxxx> --- drivers/soc/qcom/smem.c | 134 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 105 insertions(+), 29 deletions(-) diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index c28275be0038..fed2934d6bda 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -65,11 +65,20 @@ /* * Item 3 of the global heap contains an array of versions for the various * software components in the SoC. We verify that the boot loader version is - * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check. + * a valid version as a sanity check. */ #define SMEM_ITEM_VERSION 3 #define SMEM_MASTER_SBL_VERSION_INDEX 7 -#define SMEM_EXPECTED_VERSION 11 +#define SMEM_GLOBAL_HEAP_VERSION 11 + +/* + * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure + * for the global heap. A new global partition is created from the global heap + * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is + * set by the bootloader. + */ +#define SMEM_GLOBAL_PART_VERSION 12 +#define SMEM_GLOBAL_HOST 0xfffe /* * The first 8 items are only to be allocated by the boot loader while @@ -231,6 +240,8 @@ struct smem_region { * struct qcom_smem - device data for the smem device * @dev: device pointer * @hwlock: reference to a hwspinlock + * @global_partition: pointer to global partition when in use + * @global_cacheline: cacheline size for global partition * @partitions: list of pointers to partitions affecting the current * processor/host * @cacheline: list of cacheline sizes for each host @@ -242,6 +253,8 @@ struct qcom_smem { struct hwspinlock *hwlock; + struct smem_partition_header *global_partition; + size_t global_cacheline; struct smem_partition_header *partitions[SMEM_HOST_COUNT]; size_t cacheline[SMEM_HOST_COUNT]; @@ -318,16 +331,14 @@ static void *cached_entry_to_item(struct smem_private_entry *e) #define HWSPINLOCK_TIMEOUT 1000 static int qcom_smem_alloc_private(struct qcom_smem *smem, - unsigned host, + struct smem_partition_header *phdr, unsigned item, size_t size) { - struct smem_partition_header *phdr; struct smem_private_entry *hdr, *end; size_t alloc_size; void *cached; - phdr = smem->partitions[host]; hdr = phdr_to_first_uncached_entry(phdr); end = phdr_to_last_uncached_entry(phdr); cached = phdr_to_last_cached_entry(phdr); @@ -335,8 +346,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, while (hdr < end) { if (hdr->canary != SMEM_PRIVATE_CANARY) { dev_err(smem->dev, - "Found invalid canary in host %d partition\n", - host); + "Found invalid canary in hosts %d:%d partition\n", + phdr->host0, phdr->host1); return -EINVAL; } @@ -419,6 +430,7 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) { unsigned long flags; int ret; + struct smem_partition_header *phdr; if (!__smem) return -EPROBE_DEFER; @@ -435,10 +447,15 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) if (ret) return ret; - if (host < SMEM_HOST_COUNT && __smem->partitions[host]) - ret = qcom_smem_alloc_private(__smem, host, item, size); - else + if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { + phdr = __smem->partitions[host]; + ret = qcom_smem_alloc_private(__smem, phdr, item, size); + } else if (__smem->global_partition) { + phdr = __smem->global_partition; + ret = qcom_smem_alloc_private(__smem, phdr, item, size); + } else { ret = qcom_smem_alloc_global(__smem, item, size); + } hwspin_unlock_irqrestore(__smem->hwlock, &flags); @@ -480,16 +497,12 @@ static void *qcom_smem_get_global(struct qcom_smem *smem, } static void *qcom_smem_get_private(struct qcom_smem *smem, - unsigned host, + struct smem_partition_header *phdr, + size_t cacheline, unsigned item, size_t *size) { - struct smem_partition_header *phdr; struct smem_private_entry *e, *end; - size_t cacheline; - - phdr = smem->partitions[host]; - cacheline = smem->cacheline[host]; e = phdr_to_first_uncached_entry(phdr); end = phdr_to_last_uncached_entry(phdr); @@ -532,7 +545,8 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, return ERR_PTR(-ENOENT); invalid_canary: - dev_err(smem->dev, "Found invalid canary in host %d partition\n", host); + dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n", + phdr->host0, phdr->host1); return ERR_PTR(-EINVAL); } @@ -551,6 +565,8 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) unsigned long flags; int ret; void *ptr = ERR_PTR(-EPROBE_DEFER); + struct smem_partition_header *phdr; + size_t cacheln; if (!__smem) return ptr; @@ -561,10 +577,17 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) if (ret) return ERR_PTR(ret); - if (host < SMEM_HOST_COUNT && __smem->partitions[host]) - ptr = qcom_smem_get_private(__smem, host, item, size); - else + if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { + phdr = __smem->partitions[host]; + cacheln = __smem->cacheline[host]; + ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); + } else if (__smem->global_partition) { + phdr = __smem->global_partition; + cacheln = __smem->global_cacheline; + ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size); + } else { ptr = qcom_smem_get_global(__smem, item, size); + } hwspin_unlock_irqrestore(__smem->hwlock, &flags); @@ -593,6 +616,10 @@ int qcom_smem_get_free_space(unsigned host) phdr = __smem->partitions[host]; ret = le32_to_cpu(phdr->offset_free_cached) - le32_to_cpu(phdr->offset_free_uncached); + } else if (__smem->global_partition) { + phdr = __smem->global_partition; + ret = le32_to_cpu(phdr->offset_free_cached) - + le32_to_cpu(phdr->offset_free_uncached); } else { header = __smem->regions[0].virt_base; ret = le32_to_cpu(header->available); @@ -604,21 +631,61 @@ int qcom_smem_get_free_space(unsigned host) static int qcom_smem_get_sbl_version(struct qcom_smem *smem) { + struct smem_header *header; __le32 *versions; - size_t size; - versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); - if (IS_ERR(versions)) { - dev_err(smem->dev, "Unable to read the version item\n"); - return -ENOENT; + header = smem->regions[0].virt_base; + versions = header->version; + + return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); +} + +static int qcom_smem_set_global_partition(struct qcom_smem *smem, + struct smem_ptable_entry *entry) +{ + struct smem_partition_header *header; + u32 host0, host1; + + if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) { + dev_err(smem->dev, "Invalid entry for gloabl partition\n"); + return -EINVAL; } - if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) { - dev_err(smem->dev, "Version item is too small\n"); + if (smem->global_partition) { + dev_err(smem->dev, "Already found the global partition\n"); return -EINVAL; } + header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); + host0 = le16_to_cpu(header->host0); + host1 = le16_to_cpu(header->host1); - return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); + if (memcmp(header->magic, SMEM_PART_MAGIC, + sizeof(header->magic))) { + dev_err(smem->dev, "Gloal partition has invalid magic\n"); + return -EINVAL; + } + + if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) { + dev_err(smem->dev, "Global partition hosts are invalid\n"); + return -EINVAL; + } + + if (header->size != entry->size) { + dev_err(smem->dev, "Global partition has invalid size\n"); + return -EINVAL; + } + + if (le32_to_cpu(header->offset_free_uncached) > + le32_to_cpu(header->size)) { + dev_err(smem->dev, + "Global partition has invalid free pointer\n"); + return -EINVAL; + } + + smem->global_partition = header; + smem->global_cacheline = le32_to_cpu(entry->cacheline); + + return 0; } static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, @@ -647,6 +714,12 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, host0 = le16_to_cpu(entry->host0); host1 = le16_to_cpu(entry->host1); + if (host0 == SMEM_GLOBAL_HOST && host0 == host1) { + if (qcom_smem_set_global_partition(smem, entry)) + return -EINVAL; + continue; + } + if (host0 != local_host && host1 != local_host) continue; @@ -782,7 +855,10 @@ static int qcom_smem_probe(struct platform_device *pdev) } version = qcom_smem_get_sbl_version(smem); - if (version >> 16 != SMEM_EXPECTED_VERSION) { + switch (version >> 16) { + case SMEM_GLOBAL_PART_VERSION: + case SMEM_GLOBAL_HEAP_VERSION: + default: dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); return -EINVAL; } -- The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum, a Linux Foundation Collaborative Project -- To unsubscribe from this list: send the line "unsubscribe linux-soc" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html