Hi Dmitry, On 11/11/20 10:14 AM, Dmitry Osipenko wrote: > Add devfreq support to the Tegra20 EMC driver. Memory utilization > statistics will be periodically polled from the memory controller and > appropriate minimum clock rate will be selected by the devfreq governor. > > Signed-off-by: Dmitry Osipenko <digetx@xxxxxxxxx> > --- > drivers/memory/tegra/Kconfig | 3 +- > drivers/memory/tegra/tegra20-emc.c | 90 ++++++++++++++++++++++++++++++ > 2 files changed, 92 insertions(+), 1 deletion(-) > > diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig > index ac3dfe155505..8cc1ec5be443 100644 > --- a/drivers/memory/tegra/Kconfig > +++ b/drivers/memory/tegra/Kconfig > @@ -12,7 +12,8 @@ config TEGRA20_EMC > tristate "NVIDIA Tegra20 External Memory Controller driver" > default y > depends on TEGRA_MC && ARCH_TEGRA_2x_SOC > - select PM_OPP > + select DEVFREQ_GOV_SIMPLE_ONDEMAND > + select PM_DEVFREQ > help > This driver is for the External Memory Controller (EMC) found on > Tegra20 chips. The EMC controls the external DRAM on the board. > diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c > index d01b556a6d06..b9cd965980e2 100644 > --- a/drivers/memory/tegra/tegra20-emc.c > +++ b/drivers/memory/tegra/tegra20-emc.c > @@ -8,6 +8,7 @@ > #include <linux/clk.h> > #include <linux/clk/tegra.h> > #include <linux/debugfs.h> > +#include <linux/devfreq.h> > #include <linux/err.h> > #include <linux/interconnect-provider.h> > #include <linux/interrupt.h> > @@ -102,6 +103,10 @@ > > #define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4) > > +#define EMC_PWR_GATHER_CLEAR (1 << 8) > +#define EMC_PWR_GATHER_DISABLE (2 << 8) > +#define EMC_PWR_GATHER_ENABLE (3 << 8) > + > static const u16 emc_timing_registers[] = { > EMC_RC, > EMC_RFC, > @@ -157,6 +162,7 @@ struct emc_timing { > }; > > enum emc_rate_request_type { > + EMC_RATE_DEVFREQ, > EMC_RATE_DEBUG, > EMC_RATE_ICC, > EMC_RATE_TYPE_MAX, > @@ -193,6 +199,8 @@ struct tegra_emc { > > /* protect shared rate-change code path */ > struct mutex rate_lock; > + > + struct devfreq_simple_ondemand_data ondemand_data; > }; > > static irqreturn_t tegra_emc_isr(int irq, void *data) > @@ -1003,6 +1011,87 @@ static int tegra_emc_init_clk(struct tegra_emc *emc) > return 0; > } > > +static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq, > + u32 flags) > +{ > + struct tegra_emc *emc = dev_get_drvdata(dev); > + struct dev_pm_opp *opp; > + unsigned long rate; > + > + opp = devfreq_recommended_opp(dev, freq, flags); > + if (IS_ERR(opp)) { > + dev_err(dev, "failed to find opp for %lu Hz\n", *freq); > + return PTR_ERR(opp); > + } > + > + rate = dev_pm_opp_get_freq(opp); > + dev_pm_opp_put(opp); > + > + return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ); > +} > + > +static int tegra_emc_devfreq_get_dev_status(struct device *dev, > + struct devfreq_dev_status *stat) > +{ > + struct tegra_emc *emc = dev_get_drvdata(dev); > + > + /* freeze counters */ > + writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL); > + > + /* > + * busy_time: number of clocks EMC request was accepted > + * total_time: number of clocks PWR_GATHER control was set to ENABLE > + */ > + stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT); > + stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS); > + stat->current_frequency = clk_get_rate(emc->clk); > + > + /* clear counters and restart */ > + writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL); > + writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL); > + > + return 0; > +} > + > +static struct devfreq_dev_profile tegra_emc_devfreq_profile = { > + .polling_ms = 30, > + .target = tegra_emc_devfreq_target, > + .get_dev_status = tegra_emc_devfreq_get_dev_status, > +}; > + > +static int tegra_emc_devfreq_init(struct tegra_emc *emc) > +{ > + struct devfreq *devfreq; > + > + /* > + * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold > + * should be less than 50. Secondly, multiple active memory clients > + * may cause over 20% of lost clock cycles due to stalls caused by > + * competing memory accesses. This means that threshold should be > + * set to a less than 30 in order to have a properly working governor. > + */ > + emc->ondemand_data.upthreshold = 20; > + > + /* > + * Reset statistic gathers state, select global bandwidth for the > + * statistics collection mode and set clocks counter saturation > + * limit to maximum. > + */ > + writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL); > + writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL); > + writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT); > + > + devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile, > + DEVFREQ_GOV_SIMPLE_ONDEMAND, > + &emc->ondemand_data); > + if (IS_ERR(devfreq)) { > + dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq); > + return PTR_ERR(devfreq); > + } > + > + return 0; > +} > + > static int tegra_emc_probe(struct platform_device *pdev) > { > struct device_node *np; > @@ -1058,6 +1147,7 @@ static int tegra_emc_probe(struct platform_device *pdev) > tegra_emc_rate_requests_init(emc); > tegra_emc_debugfs_init(emc); > tegra_emc_interconnect_init(emc); > + tegra_emc_devfreq_init(emc); > > /* > * Don't allow the kernel module to be unloaded. Unloading adds some > Reviewed-by: Chanwoo Choi <cw00.choi@xxxxxxxxxxx> -- Best Regards, Chanwoo Choi Samsung Electronics _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel