Hi Boris, On Fri, 1 Jun 2018 16:36:02 +0200, Boris Brezillon <boris.brezillon@xxxxxxxxxxx> wrote: > Most modern QSPI controllers support can directly map a SPI memory (or s/support// ? > a portion of the SPI memory) in the CPU address space. Most of the time > this brings significant performance improvements as it automates the > whole process of sending SPI memory operations every time a new region > is accessed. > > This new API allow SPI memory driver to create direct mappings and then s/allow/allows/ s/driver/drivers/ ? > use them to access the memory instead of using spi_mem_exec_op(). > > Signed-off-by: Boris Brezillon <boris.brezillon@xxxxxxxxxxx> > --- > drivers/spi/spi-mem.c | 267 ++++++++++++++++++++++++++++++++++++++++---- > include/linux/spi/spi-mem.h | 72 ++++++++++++ > 2 files changed, 318 insertions(+), 21 deletions(-) > > diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c > index 990770dfa5cf..90ea0c5263a7 100644 > --- a/drivers/spi/spi-mem.c > +++ b/drivers/spi/spi-mem.c > @@ -175,6 +175,44 @@ bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) > } > EXPORT_SYMBOL_GPL(spi_mem_supports_op); > > +static int spi_mem_access_start(struct spi_mem *mem) > +{ > + struct spi_controller *ctlr = mem->spi->controller; > + > + /* > + * Flush the message queue before executing our SPI memory > + * operation to prevent preemption of regular SPI transfers. > + */ > + spi_flush_queue(ctlr); > + > + if (ctlr->auto_runtime_pm) { > + int ret; > + > + ret = pm_runtime_get_sync(ctlr->dev.parent); > + if (ret < 0) { > + dev_err(&ctlr->dev, "Failed to power device: %d\n", > + ret); > + return ret; > + } > + } > + > + mutex_lock(&ctlr->bus_lock_mutex); > + mutex_lock(&ctlr->io_mutex); > + > + return 0; > +} > + > +static void spi_mem_access_end(struct spi_mem *mem) > +{ > + struct spi_controller *ctlr = mem->spi->controller; > + > + mutex_unlock(&ctlr->io_mutex); > + mutex_unlock(&ctlr->bus_lock_mutex); > + > + if (ctlr->auto_runtime_pm) > + pm_runtime_put(ctlr->dev.parent); > +} > + > /** > * spi_mem_exec_op() - Execute a memory operation > * @mem: the SPI memory > @@ -200,30 +238,13 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) > return -ENOTSUPP; > > if (ctlr->mem_ops) { > - /* > - * Flush the message queue before executing our SPI memory > - * operation to prevent preemption of regular SPI transfers. > - */ > - spi_flush_queue(ctlr); > - > - if (ctlr->auto_runtime_pm) { > - ret = pm_runtime_get_sync(ctlr->dev.parent); > - if (ret < 0) { > - dev_err(&ctlr->dev, > - "Failed to power device: %d\n", > - ret); > - return ret; > - } > - } > + ret = spi_mem_access_start(mem); > + if (ret) > + return ret; > > - mutex_lock(&ctlr->bus_lock_mutex); > - mutex_lock(&ctlr->io_mutex); > ret = ctlr->mem_ops->exec_op(mem, op); > - mutex_unlock(&ctlr->io_mutex); > - mutex_unlock(&ctlr->bus_lock_mutex); > > - if (ctlr->auto_runtime_pm) > - pm_runtime_put(ctlr->dev.parent); > + spi_mem_access_end(mem); As this is something you tell me on a weekly basis: would you mind to separate the direct mapping support and the spi_mem_access_start/end() helpers introduction in different patches? :) > > /* > * Some controllers only optimize specific paths (typically the > @@ -336,6 +357,210 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) > } > EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); > [...] > + > +/** > + * struct spi_mem_dirmap_desc - Direct mapping descriptor > + * @mem: the SPI memory device this direct mapping is attached to > + * @info: information passed at direct mapping creation time > + * @nodirmap: set to true if the SPI controller does not implement > + * ->mem_ops->dirmap_create() or when this function returned an s/returned/returns/ > + * error. If dirmap is true, all spi_mem_dirmap_{read,write}() > + * calls will use spi_mem_exec_op() to access the memory. This is a > + * degraded mode that allows higher spi_mem drivers to use the same > + * code no matter if the controller supports direct mapping or not > + * @priv: field pointing to controller specific data > + * > + * Common part of a direct mapping descriptor. This object is created by > + * spi_mem_dirmap_create() and controller implementation of ->create_dirmap() > + * can create/attach direct mapping resources to the descriptor in the ->priv > + * field. > + */ > +struct spi_mem_dirmap_desc { > + struct spi_mem *mem; > + struct spi_mem_dirmap_info info; > + bool nodirmap; > + void *priv; > +}; > + > /** > * struct spi_mem - describes a SPI memory device > * @spi: the underlying SPI device > @@ -167,10 +210,24 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem) > * limitations) > * @supports_op: check if an operation is supported by the controller > * @exec_op: execute a SPI memory operation > + * @dirmap_create: create a direct mapping descriptor that can later be used to > + * access the memory device. This method is optional Only *dirmap_create() is marked as optional while all are. > + * @dirmap_destroy: destroy a memory descriptor previous created by > + * ->dirmap_create() s/previous/previously/ > + * @dirmap_read: read data from the memory device using the direct mapping > + * created by ->dirmap_create(). > + * @dirmap_write: write data to the memory device using the direct mapping > + * created by ->dirmap_create(). I think there is a better kernel-doc way to reference dirmap_create(), maybe with '@' (I don't remember exactly). > * > * This interface should be implemented by SPI controllers providing an > * high-level interface to execute SPI memory operation, which is usually the > * case for QSPI controllers. > + * > + * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct > + * mapping from the CPU because doing that can stall the CPU waiting for the > + * SPI mem transaction to finish, and this will make real-time maintainers > + * unhappy and might make your system less reactive. Instead, drivers should > + * use DMA to access this direct mapping. > */ > struct spi_controller_mem_ops { > int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op); > @@ -178,6 +235,12 @@ struct spi_controller_mem_ops { > const struct spi_mem_op *op); > int (*exec_op)(struct spi_mem *mem, > const struct spi_mem_op *op); > + int (*dirmap_create)(struct spi_mem_dirmap_desc *desc); > + void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc); > + ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc, > + u64 offs, size_t len, void *buf); > + ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, > + u64 offs, size_t len, const void *buf); > }; > > /** > @@ -236,6 +299,15 @@ bool spi_mem_supports_op(struct spi_mem *mem, > int spi_mem_exec_op(struct spi_mem *mem, > const struct spi_mem_op *op); > > +struct spi_mem_dirmap_desc * > +spi_mem_dirmap_create(struct spi_mem *mem, > + const struct spi_mem_dirmap_info *info); > +void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc); > +ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, > + u64 offs, size_t len, void *buf); > +ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, > + u64 offs, size_t len, const void *buf); > + > int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, > struct module *owner); > The rest is clear for me. Thanks, Miquèl -- To unsubscribe from this list: send the line "unsubscribe linux-spi" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html