Enable runtime PM in mtd driver to notify graphics driver that whole card should be kept awake while nvm operations are performed through this driver. CC: Lucas De Marchi <lucas.demarchi@xxxxxxxxx> Signed-off-by: Alexander Usyskin <alexander.usyskin@xxxxxxxxx> --- drivers/mtd/devices/mtd-intel-dg.c | 73 +++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 12 deletions(-) diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-intel-dg.c index 230bf444b7fe..05a2f87b722a 100644 --- a/drivers/mtd/devices/mtd-intel-dg.c +++ b/drivers/mtd/devices/mtd-intel-dg.c @@ -15,11 +15,14 @@ #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> +#include <linux/pm_runtime.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sizes.h> #include <linux/types.h> +#define INTEL_DG_NVM_RPM_TIMEOUT 500 + struct intel_dg_nvm { struct kref refcnt; struct mtd_info mtd; @@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info) loff_t from; size_t len; size_t total_len; + int ret = 0; if (WARN_ON(!nvm)) return -EINVAL; @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info) total_len = info->len; addr = info->addr; + ret = pm_runtime_resume_and_get(mtd->dev.parent); + if (ret < 0) { + dev_err(&mtd->dev, "rpm: get failed %d\n", ret); + return ret; + } + guard(mutex)(&nvm->lock); while (total_len > 0) { if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) { dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len); info->fail_addr = addr; - return -ERANGE; + ret = -ERANGE; + goto out; } idx = idg_nvm_get_region(nvm, addr); if (idx >= nvm->nregions) { dev_err(&mtd->dev, "out of range"); info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; - return -ERANGE; + ret = -ERANGE; + goto out; } from = addr - nvm->regions[idx].offset; @@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info) if (bytes < 0) { dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes); info->fail_addr += nvm->regions[idx].offset; - return bytes; + ret = bytes; + goto out; } addr += len; total_len -= len; } - return 0; +out: + pm_runtime_mark_last_busy(mtd->dev.parent); + pm_runtime_put_autosuspend(mtd->dev.parent); + return ret; } static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, @@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, if (len > nvm->regions[idx].size - from) len = nvm->regions[idx].size - from; + ret = pm_runtime_resume_and_get(mtd->dev.parent); + if (ret < 0) { + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret); + return ret; + } + guard(mutex)(&nvm->lock); ret = idg_read(nvm, region, from, len, buf); if (ret < 0) { dev_dbg(&mtd->dev, "read failed with %zd\n", ret); - return ret; + } else { + *retlen = ret; + ret = 0; } - *retlen = ret; - - return 0; + pm_runtime_mark_last_busy(mtd->dev.parent); + pm_runtime_put_autosuspend(mtd->dev.parent); + return ret; } static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, @@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, if (len > nvm->regions[idx].size - to) len = nvm->regions[idx].size - to; + ret = pm_runtime_resume_and_get(mtd->dev.parent); + if (ret < 0) { + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret); + return ret; + } + guard(mutex)(&nvm->lock); ret = idg_write(nvm, region, to, len, buf); if (ret < 0) { dev_dbg(&mtd->dev, "write failed with %zd\n", ret); - return ret; + } else { + *retlen = ret; + ret = 0; } - *retlen = ret; - - return 0; + pm_runtime_mark_last_busy(mtd->dev.parent); + pm_runtime_put_autosuspend(mtd->dev.parent); + return ret; } static void intel_dg_nvm_release(struct kref *kref) @@ -720,6 +752,17 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev, n++; } + pm_runtime_enable(device); + + pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT); + pm_runtime_use_autosuspend(device); + + ret = pm_runtime_resume_and_get(device); + if (ret < 0) { + dev_err(device, "rpm: get failed %d\n", ret); + goto err_norpm; + } + nvm->base = devm_ioremap_resource(device, &invm->bar); if (IS_ERR(nvm->base)) { dev_err(device, "mmio not mapped\n"); @@ -742,9 +785,13 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev, dev_set_drvdata(&aux_dev->dev, nvm); + pm_runtime_put(device); return 0; err: + pm_runtime_put(device); +err_norpm: + pm_runtime_disable(device); kref_put(&nvm->refcnt, intel_dg_nvm_release); return ret; } @@ -756,6 +803,8 @@ static void intel_dg_mtd_remove(struct auxiliary_device *aux_dev) if (!nvm) return; + pm_runtime_disable(&aux_dev->dev); + mtd_device_unregister(&nvm->mtd); dev_set_drvdata(&aux_dev->dev, NULL); -- 2.43.0