The patch adds support for dynamic reconfiguration of clock output rate. Output clocks are registered as dividers and set rate callback function is used for dynamic reconfiguration. Based on the initial work from Chirag. Signed-off-by: Chirag Parekh <chirag.parekh@xxxxxxxxxx> Signed-off-by: Shubhrajyoti Datta <shubhrajyoti.datta@xxxxxxxxxx> --- drivers/clk/clk-xlnx-clock-wizard.c | 212 +++++++++++++++++++++++++++++++++++- 1 file changed, 206 insertions(+), 6 deletions(-) diff --git a/drivers/clk/clk-xlnx-clock-wizard.c b/drivers/clk/clk-xlnx-clock-wizard.c index d6577c8..4b563f3 100644 --- a/drivers/clk/clk-xlnx-clock-wizard.c +++ b/drivers/clk/clk-xlnx-clock-wizard.c @@ -31,8 +31,23 @@ #define WZRD_DIVCLK_DIVIDE_SHIFT 0 #define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT) #define WZRD_CLKOUT_DIVIDE_SHIFT 0 +#define WZRD_CLKOUT_DIVIDE_WIDTH 8 #define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT) +#define WZRD_DR_MAX_INT_DIV_VALUE 255 +#define WZRD_DR_NUM_RETRIES 10000 +#define WZRD_DR_STATUS_REG_OFFSET 0x04 +#define WZRD_DR_LOCK_BIT_MASK 0x00000001 +#define WZRD_DR_INIT_REG_OFFSET 0x25C +#define WZRD_DR_DIV_TO_PHASE_OFFSET 4 +#define WZRD_DR_BEGIN_DYNA_RECONF 0x03 + +/* Get the mask from width */ +#define div_mask(width) ((1 << (width)) - 1) + +/* Extract divider instance from clock hardware instance */ +#define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw) + enum clk_wzrd_int_clks { wzrd_clk_mul, wzrd_clk_mul_div, @@ -64,6 +79,29 @@ struct clk_wzrd { bool suspended; }; +/** + * struct clk_wzrd_divider - clock divider specific to clk_wzrd + * + * @hw: handle between common and hardware-specific interfaces + * @base: base address of register containing the divider + * @offset: offset address of register containing the divider + * @shift: shift to the divider bit field + * @width: width of the divider bit field + * @flags: clk_wzrd divider flags + * @table: array of value/divider pairs, last entry should have div = 0 + * @lock: register lock + */ +struct clk_wzrd_divider { + struct clk_hw hw; + void __iomem *base; + u16 offset; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; + spinlock_t *lock; /* divider lock */ +}; + #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb) /* maximum frequencies for input/output clocks per speed grade */ @@ -73,6 +111,164 @@ static const unsigned long clk_wzrd_max_freq[] = { 1066000000UL }; +/* spin lock variable for clk_wzrd */ +static DEFINE_SPINLOCK(clkwzrd_lock); + +static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw); + void __iomem *div_addr = + (void __iomem *)((u64)divider->base + divider->offset); + unsigned int val; + + val = readl(div_addr) >> divider->shift; + val &= div_mask(divider->width); + + return divider_recalc_rate(hw, parent_rate, val, divider->table, + divider->flags); +} + +static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + int err = 0; + u16 retries; + u32 value; + unsigned long flags = 0; + struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw); + void __iomem *div_addr = + (void __iomem *)((u64)divider->base + divider->offset); + + if (divider->lock) + spin_lock_irqsave(divider->lock, flags); + else + __acquire(divider->lock); + + value = DIV_ROUND_CLOSEST(parent_rate, rate); + + /* Cap the value to max */ + if (value > WZRD_DR_MAX_INT_DIV_VALUE) + value = WZRD_DR_MAX_INT_DIV_VALUE; + + /* Set divisor and clear phase offset */ + writel(value, div_addr); + writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET); + + /* Check status register */ + retries = WZRD_DR_NUM_RETRIES; + while (retries--) { + if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) & + WZRD_DR_LOCK_BIT_MASK) + break; + } + + if (retries == 0) { + err = -ETIMEDOUT; + goto err_reconfig; + } + + /* Initiate reconfiguration */ + writel(WZRD_DR_BEGIN_DYNA_RECONF, + divider->base + WZRD_DR_INIT_REG_OFFSET); + + /* Check status register */ + retries = WZRD_DR_NUM_RETRIES; + while (retries--) { + if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) & + WZRD_DR_LOCK_BIT_MASK) + break; + } + + if (retries == 0) + err = -ETIMEDOUT; + +err_reconfig: + if (divider->lock) + spin_unlock_irqrestore(divider->lock, flags); + else + __release(divider->lock); + + return err; +} + +static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + u8 div; + + /* + * since we donot change parent rate we just round rate to closest + * achievable + */ + div = DIV_ROUND_CLOSEST(*prate, rate); + + return (*prate / div); +} + +static const struct clk_ops clk_wzrd_clk_divider_ops = { + .round_rate = clk_wzrd_round_rate, + .set_rate = clk_wzrd_dynamic_reconfig, + .recalc_rate = clk_wzrd_recalc_rate, +}; + +static struct clk *clk_wzrd_register_divider(struct device *dev, + const char *name, + const char *parent_name, + unsigned long flags, + void __iomem *base, u16 offset, + u8 shift, u8 width, + u8 clk_divider_flags, + const struct clk_div_table *table, + spinlock_t *lock) +{ + struct clk_wzrd_divider *div; + struct clk_hw *hw; + struct clk_init_data init; + int ret; + + if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) { + if (width + shift > 16) { + pr_warn("divider value exceeds LOWORD field\n"); + return ERR_PTR(-EINVAL); + } + } + + /* allocate the divider */ + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + init.name = name; + if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) + init.ops = &clk_divider_ro_ops; + else + init.ops = &clk_wzrd_clk_divider_ops; + init.flags = flags | CLK_IS_BASIC; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + + /* struct clk_divider assignments */ + div->base = base; + div->offset = offset; + div->shift = shift; + div->width = width; + div->flags = clk_divider_flags; + div->lock = lock; + div->hw.init = &init; + div->table = table; + + /* register the clock */ + hw = &div->hw; + ret = clk_hw_register(dev, hw); + if (ret) { + kfree(div); + hw = ERR_PTR(ret); + } + + return hw->clk; +} + static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event, void *data) { @@ -225,7 +421,8 @@ static int clk_wzrd_probe(struct platform_device *pdev) clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_fixed_factor (&pdev->dev, clk_name, __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]), - 0, 1, reg); + flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED | + CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock); if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) { dev_err(&pdev->dev, "unable to register divider clock\n"); ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]); @@ -243,11 +440,14 @@ static int clk_wzrd_probe(struct platform_device *pdev) ret = -EINVAL; goto err_rm_int_clks; } - reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12); - reg &= WZRD_CLKOUT_DIVIDE_MASK; - reg >>= WZRD_CLKOUT_DIVIDE_SHIFT; - clk_wzrd->clkout[i] = clk_register_fixed_factor - (&pdev->dev, clkout_name, clk_name, 0, 1, reg); + clk_wzrd->clkout[i] = clk_wzrd_register_divider(&pdev->dev, + clkout_name, + clk_name, 0, + clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12), + WZRD_CLKOUT_DIVIDE_SHIFT, + WZRD_CLKOUT_DIVIDE_WIDTH, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + NULL, &clkwzrd_lock); if (IS_ERR(clk_wzrd->clkout[i])) { int j; -- 2.1.1