The current implementation CPTS initialization and deinitialization (represented by cpts_register/unregister()) is pretty entangled and has some issues, like: - ptp clock registered before spinlock, which is protecting it, and before timecounter and cyclecounter initialization; - CPTS ref_clk requested using devm API while cpts_register() is called from .ndo_open(), as result additional checks required; - CPTS ref_clk is prepared, but never unprepared; - CPTS is not disabled even when unregistered.. Hence, make things simpler and fix above issues by adding cpts_create()/cpts_release() which should be called from .probe()/.remove() respectively and move all static initialization there. Clean up and update cpts_register/unregister() so PTP clock is registered the last and unregistered first. In addition, this change allows to clean up cpts.h for the case when CPTS is disabled. Signed-off-by: Grygorii Strashko <grygorii.strashko@xxxxxx> --- drivers/net/ethernet/ti/cpsw.c | 24 ++++---- drivers/net/ethernet/ti/cpts.c | 125 ++++++++++++++++++++++++++--------------- drivers/net/ethernet/ti/cpts.h | 26 +++++++-- 3 files changed, 113 insertions(+), 62 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 9b900f0..dfd5707 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1406,9 +1406,7 @@ static int cpsw_ndo_open(struct net_device *ndev) if (ret < 0) goto err_cleanup; - if (cpts_register(cpsw->dev, cpsw->cpts, - cpsw->data.cpts_clock_mult, - cpsw->data.cpts_clock_shift)) + if (cpts_register(cpsw->cpts)) dev_err(priv->dev, "error registering cpts device\n"); } @@ -2551,6 +2549,7 @@ static int cpsw_probe(struct platform_device *pdev) struct cpdma_params dma_params; struct cpsw_ale_params ale_params; void __iomem *ss_regs; + void __iomem *cpts_regs; struct resource *res, *ss_res; const struct of_device_id *of_id; struct gpio_descs *mode; @@ -2575,12 +2574,6 @@ static int cpsw_probe(struct platform_device *pdev) priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); cpsw->rx_packet_max = max(rx_packet_max, 128); - cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); - if (!cpsw->cpts) { - dev_err(&pdev->dev, "error allocating cpts\n"); - ret = -ENOMEM; - goto clean_ndev_ret; - } mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW); if (IS_ERR(mode)) { @@ -2669,7 +2662,7 @@ static int cpsw_probe(struct platform_device *pdev) switch (cpsw->version) { case CPSW_VERSION_1: cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - cpsw->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + cpts_regs = ss_regs + CPSW1_CPTS_OFFSET; cpsw->hw_stats = ss_regs + CPSW1_HW_STATS; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; @@ -2683,7 +2676,7 @@ static int cpsw_probe(struct platform_device *pdev) case CPSW_VERSION_3: case CPSW_VERSION_4: cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - cpsw->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + cpts_regs = ss_regs + CPSW2_CPTS_OFFSET; cpsw->hw_stats = ss_regs + CPSW2_HW_STATS; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; @@ -2749,6 +2742,14 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dma_ret; } + cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, + cpsw->data.cpts_clock_mult, + cpsw->data.cpts_clock_shift); + if (IS_ERR(cpsw->cpts)) { + ret = PTR_ERR(cpsw->cpts); + goto clean_ale_ret; + } + ndev->irq = platform_get_irq(pdev, 1); if (ndev->irq < 0) { dev_err(priv->dev, "error getting irq resource\n"); @@ -2857,6 +2858,7 @@ static int cpsw_remove(struct platform_device *pdev) unregister_netdev(cpsw->slaves[1].ndev); unregister_netdev(ndev); + cpts_release(cpsw->cpts); cpsw_ale_destroy(cpsw->ale); cpdma_ctlr_destroy(cpsw->dma); of_platform_depopulate(&pdev->dev); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index aaab08e..a46478e 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -228,22 +228,6 @@ static void cpts_overflow_check(struct work_struct *work) schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); } -static void cpts_clk_init(struct device *dev, struct cpts *cpts) -{ - cpts->refclk = devm_clk_get(dev, "cpts"); - if (IS_ERR(cpts->refclk)) { - dev_err(dev, "Failed to get cpts refclk\n"); - cpts->refclk = NULL; - return; - } - clk_prepare_enable(cpts->refclk); -} - -static void cpts_clk_release(struct cpts *cpts) -{ - clk_disable(cpts->refclk); -} - static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, u16 ts_seqid, u8 ts_msgtype) { @@ -323,7 +307,7 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) u64 ns; struct skb_shared_hwtstamps *ssh; - if (!cpts->rx_enable) + if (!cpts || !cpts->rx_enable) return; ns = cpts_find_ts(cpts, skb, CPTS_EV_RX); if (!ns) @@ -338,7 +322,7 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) u64 ns; struct skb_shared_hwtstamps ssh; - if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + if (!cpts || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) return; ns = cpts_find_ts(cpts, skb, CPTS_EV_TX); if (!ns) @@ -348,53 +332,102 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) skb_tstamp_tx(skb, &ssh); } -int cpts_register(struct device *dev, struct cpts *cpts, - u32 mult, u32 shift) +int cpts_register(struct cpts *cpts) { int err, i; - unsigned long flags; - cpts->info = cpts_info; - cpts->clock = ptp_clock_register(&cpts->info, dev); - if (IS_ERR(cpts->clock)) { - err = PTR_ERR(cpts->clock); - cpts->clock = NULL; - return err; - } - spin_lock_init(&cpts->lock); - - cpts->cc.read = cpts_systim_read; - cpts->cc.mask = CLOCKSOURCE_MASK(32); - cpts->cc_mult = mult; - cpts->cc.mult = mult; - cpts->cc.shift = shift; + if (!cpts) + return -EINVAL; INIT_LIST_HEAD(&cpts->events); INIT_LIST_HEAD(&cpts->pool); for (i = 0; i < CPTS_MAX_EVENTS; i++) list_add(&cpts->pool_data[i].list, &cpts->pool); - cpts_clk_init(dev, cpts); + clk_enable(cpts->refclk); + cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); - spin_lock_irqsave(&cpts->lock, flags); + cpts->cc.mult = cpts->cc_mult; timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real())); - spin_unlock_irqrestore(&cpts->lock, flags); - - INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); - schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); + cpts->info = cpts_info; + cpts->clock = ptp_clock_register(&cpts->info, cpts->dev); + if (IS_ERR(cpts->clock)) { + err = PTR_ERR(cpts->clock); + cpts->clock = NULL; + goto err_ptp; + } cpts->phc_index = ptp_clock_index(cpts->clock); + + schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); return 0; + +err_ptp: + clk_disable(cpts->refclk); + return err; } void cpts_unregister(struct cpts *cpts) { - if (cpts->clock) { - ptp_clock_unregister(cpts->clock); - cancel_delayed_work_sync(&cpts->overflow_work); + if (!cpts) + return; + + if (WARN_ON(!cpts->clock)) + return; + + cancel_delayed_work_sync(&cpts->overflow_work); + + ptp_clock_unregister(cpts->clock); + cpts->clock = NULL; + + cpts_write32(cpts, 0, int_enable); + cpts_write32(cpts, 0, control); + + clk_disable(cpts->refclk); +} + +struct cpts *cpts_create(struct device *dev, void __iomem *regs, + u32 mult, u32 shift) +{ + struct cpts *cpts; + + if (!regs || !dev) + return ERR_PTR(-EINVAL); + + cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL); + if (!cpts) + return ERR_PTR(-ENOMEM); + + cpts->dev = dev; + cpts->reg = (struct cpsw_cpts __iomem *)regs; + spin_lock_init(&cpts->lock); + INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); + + cpts->refclk = devm_clk_get(dev, "cpts"); + if (IS_ERR(cpts->refclk)) { + dev_err(dev, "Failed to get cpts refclk\n"); + return ERR_PTR(PTR_ERR(cpts->refclk)); } - if (cpts->refclk) - cpts_clk_release(cpts); + + clk_prepare(cpts->refclk); + + cpts->cc.read = cpts_systim_read; + cpts->cc.mask = CLOCKSOURCE_MASK(32); + cpts->cc.shift = shift; + cpts->cc_mult = mult; + + return cpts; +} + +void cpts_release(struct cpts *cpts) +{ + if (!cpts) + return; + + if (!cpts->refclk) + return; + + clk_unprepare(cpts->refclk); } diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index fec753c..0c02f48 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h @@ -20,6 +20,8 @@ #ifndef _TI_CPTS_H_ #define _TI_CPTS_H_ +#ifdef CONFIG_TI_CPTS + #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clocksource.h> @@ -108,10 +110,10 @@ struct cpts_event { }; struct cpts { + struct device *dev; struct cpsw_cpts __iomem *reg; int tx_enable; int rx_enable; -#ifdef CONFIG_TI_CPTS struct ptp_clock_info info; struct ptp_clock *clock; spinlock_t lock; /* protects time registers */ @@ -124,14 +126,15 @@ struct cpts { struct list_head events; struct list_head pool; struct cpts_event pool_data[CPTS_MAX_EVENTS]; -#endif }; -#ifdef CONFIG_TI_CPTS void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb); -int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift); +int cpts_register(struct cpts *cpts); void cpts_unregister(struct cpts *cpts); +struct cpts *cpts_create(struct device *dev, void __iomem *regs, + u32 mult, u32 shift); +void cpts_release(struct cpts *cpts); static inline void cpts_rx_enable(struct cpts *cpts, int enable) { @@ -156,6 +159,8 @@ static inline bool cpts_is_tx_enabled(struct cpts *cpts) } #else +struct cpts; + static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) { } @@ -163,8 +168,19 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) { } +static inline +struct cpts *cpts_create(struct device *dev, void __iomem *regs, + u32 mult, u32 shift) +{ + return NULL; +} + +static inline void cpts_release(struct cpts *cpts) +{ +} + static inline int -cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift) +cpts_register(struct cpts *cpts) { return 0; } -- 2.9.3 -- To unsubscribe from this list: send the line "unsubscribe linux-omap" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html