From: Daniel Mack <zonque@xxxxxxxxx> This patch makes the edma driver resume correctly after suspend. Tested on an AM33xx platform with cyclic audio streams and omap_hsmmc. All information can be reconstructed by already known runtime information. As we now use some functions that were previously only used from __init context, annotations had to be dropped. [nm@xxxxxx: added error handling for runtime + suspend_late/early_resume] Signed-off-by: Nishanth Menon <nm@xxxxxx> Signed-off-by: Daniel Mack <zonque@xxxxxxxxx> Tested-by: Joel Fernandes <joelf@xxxxxx> Acked-by: Joel Fernandes <joelf@xxxxxx> [d-gerlach@xxxxxx: updated to remove queue_tc_mapping use] Signed-off-by: Dave Gerlach <d-gerlach@xxxxxx> --- Needed for am335x suspend, but never got picked up. Previously sent here: http://marc.info/?l=linux-arm-kernel&m=138556067416051&w=2 arch/arm/common/edma.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 2 deletions(-) diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 8809917..ece1e3d 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -244,6 +244,8 @@ struct edma { /* list of channels with no even trigger; terminated by "-1" */ const s8 *noevent; + struct edma_soc_info *info; + /* The edma_inuse bit for each PaRAM slot is clear unless the * channel is in use ... by ARM or DSP, for QDMA, or whatever. */ @@ -295,7 +297,7 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no, ~(0x7 << bit), queue_no << bit); } -static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, +static void assign_priority_to_queue(unsigned ctlr, int queue_no, int priority) { int bit = queue_no * 4; @@ -314,7 +316,7 @@ static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, * included in that particular EDMA variant (Eg : dm646x) * */ -static void __init map_dmach_param(unsigned ctlr) +static void map_dmach_param(unsigned ctlr) { int i; for (i = 0; i < EDMA_MAX_DMACH; i++) @@ -1791,15 +1793,95 @@ static int edma_probe(struct platform_device *pdev) edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); edma_write_array(j, EDMA_QRAE, i, 0x0); } + edma_cc[j]->info = info[j]; arch_num_cc++; } return 0; } +static int edma_pm_suspend(struct device *dev) +{ + int j, r; + + r = pm_runtime_get_sync(dev); + if (r < 0) { + dev_err(dev, "%s: get_sync returned %d\n", __func__, r); + return r; + } + + for (j = 0; j < arch_num_cc; j++) { + struct edma *ecc = edma_cc[j]; + + disable_irq(ecc->irq_res_start); + disable_irq(ecc->irq_res_end); + } + + pm_runtime_put_sync(dev); + + return 0; +} + +static int edma_pm_resume(struct device *dev) +{ + int i, j, r; + + r = pm_runtime_get_sync(dev); + if (r < 0) { + dev_err(dev, "%s: get_sync returned %d\n", __func__, r); + return r; + } + + for (j = 0; j < arch_num_cc; j++) { + struct edma *cc = edma_cc[j]; + + s8 (*queue_priority_mapping)[2]; + + queue_priority_mapping = cc->info->queue_priority_mapping; + + /* Event queue priority mapping */ + for (i = 0; queue_priority_mapping[i][0] != -1; i++) + assign_priority_to_queue(j, + queue_priority_mapping[i][0], + queue_priority_mapping[i][1]); + + /* + * Map the channel to param entry if channel mapping logic + * exist + */ + if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) + map_dmach_param(j); + + for (i = 0; i < cc->num_channels; i++) { + if (test_bit(i, cc->edma_inuse)) { + /* ensure access through shadow region 0 */ + edma_or_array2(j, EDMA_DRAE, 0, i >> 5, + BIT(i & 0x1f)); + + setup_dma_interrupt(i, + cc->intr_data[i].callback, + cc->intr_data[i].data); + } + } + + enable_irq(cc->irq_res_start); + enable_irq(cc->irq_res_end); + } + + pm_runtime_put_sync(dev); + + return 0; +} + +static const struct dev_pm_ops edma_pm_ops = { + .suspend_late = edma_pm_suspend, + .resume_early = edma_pm_resume, +}; + static struct platform_driver edma_driver = { .driver = { .name = "edma", + .pm = &edma_pm_ops, .of_match_table = edma_of_ids, }, .probe = edma_probe, -- 1.9.0 -- To unsubscribe from this list: send the line "unsubscribe linux-omap" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html