On Wed, Apr 26, 2023 at 11:54:29AM -0700, Justin Chen wrote: > Add support for the Broadcom ASP 2.0 Ethernet controller which is first > introduced with 72165. This controller features two distinct Ethernet > ports that can be independently operated. > > This patch supports: > > - Wake-on-LAN using magic packets > - basic ethtool operations (link, counters, message level) > - MAC destination address filtering (promiscuous, ALL_MULTI, etc.) > > Signed-off-by: Florian Fainelli <f.fainelli@xxxxxxxxx> > Signed-off-by: Justin Chen <justinpopo6@xxxxxxxxx> ... > diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c ... > +static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv, > + struct bcmasp_net_filter *nfilt, > + enum asp_netfilt_reg_type reg_type, > + u32 offset) > +{ > + u32 block_index, filter_sel; > + > + if (offset < 32) { > + block_index = ASP_RX_FILTER_NET_L2; > + filter_sel = nfilt->hw_index; > + } else if (offset < 64) { > + block_index = ASP_RX_FILTER_NET_L2; > + filter_sel = nfilt->hw_index + 1; > + } else if (offset < 96) { > + block_index = ASP_RX_FILTER_NET_L3_0; > + filter_sel = nfilt->hw_index; > + } else if (offset < 128) { > + block_index = ASP_RX_FILTER_NET_L3_0; > + filter_sel = nfilt->hw_index + 1; > + } else if (offset < 160) { > + block_index = ASP_RX_FILTER_NET_L3_1; > + filter_sel = nfilt->hw_index; > + } else if (offset < 192) { > + block_index = ASP_RX_FILTER_NET_L3_1; > + filter_sel = nfilt->hw_index + 1; > + } else if (offset < 224) { > + block_index = ASP_RX_FILTER_NET_L4; > + filter_sel = nfilt->hw_index; > + } else if (offset < 256) { > + block_index = ASP_RX_FILTER_NET_L4; > + filter_sel = nfilt->hw_index + 1; > + } block_index and filter_sel are uninitialised if offset doesn't match any of the conditions above. Can that happen? > + > + switch (reg_type) { > + case ASP_NETFILT_MATCH: > + return ASP_RX_FILTER_NET_PAT(filter_sel, block_index, > + (offset % 32)); > + case ASP_NETFILT_MASK: > + return ASP_RX_FILTER_NET_MASK(filter_sel, block_index, > + (offset % 32)); > + default: > + return -EINVAL; > + } > +} ... > +static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv, > + struct bcmasp_net_filter *nfilt, > + struct ethtool_tcpip4_spec *match, > + struct ethtool_tcpip4_spec *mask, > + u32 offset) > +{ > + __be16 val_16, mask_16; > + > + val_16 = htons(ETH_P_IP); > + mask_16 = 0xFFFF; mask_17 is __be16, but 0xFFFF is host byte order. Please make sure there are no new warnings when building with W=1 C=1. ... > +/* If no network filter found, return open filter. > + * If no more open filters return NULL > + */ > +struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf, > + int loc, bool wake_filter, > + bool init) > +{ > + struct bcmasp_priv *priv = intf->parent; > + struct bcmasp_net_filter *nfilter = NULL; > + int i, open_index = -1; Please use reverse xmas tree - longest line to shortest - for local variable declarations in networking code. You can check for this using https://github.com/ecree-solarflare/xmastree ... > +static int bcmasp_combine_set_filter(struct bcmasp_intf *intf, > + unsigned char *addr, unsigned char *mask, > + int i) > +{ > + u64 addr1, addr2, mask1, mask2, mask3; > + struct bcmasp_priv *priv = intf->parent; > + > + /* Switch to u64 to help with the calculations */ > + addr1 = ether_addr_to_u64(priv->mda_filters[i].addr); > + mask1 = ether_addr_to_u64(priv->mda_filters[i].mask); > + addr2 = ether_addr_to_u64(addr); > + mask2 = ether_addr_to_u64(mask); > + > + /* Check if one filter resides within the other */ > + mask3 = mask1 & mask2; > + if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) { > + /* Filter 2 resides within fitler 1, so everthing is good */ nit: s/fitler/filter/ Please consider running ./scripts/checkpatch.pl --codespell ... > +static void bcmasp_update_mib_counters(struct bcmasp_intf *priv) > +{ > + int i, j = 0; > + > + for (i = 0; i < BCMASP_STATS_LEN; i++) { > + const struct bcmasp_stats *s; > + u16 offset = 0; > + u32 val = 0; > + char *p; > + > + s = &bcmasp_gstrings_stats[i]; > + switch (s->type) { > + case BCMASP_STAT_NETDEV: > + case BCMASP_STAT_SOFT: > + continue; > + case BCMASP_STAT_RUNT: > + offset += BCMASP_STAT_OFFSET; > + fallthrough; > + case BCMASP_STAT_MIB_TX: > + offset += BCMASP_STAT_OFFSET; > + fallthrough; > + case BCMASP_STAT_MIB_RX: > + val = umac_rl(priv, UMC_MIB_START + j + offset); > + offset = 0; /* Reset Offset */ > + break; > + case BCMASP_STAT_RX_EDPKT: > + val = rx_edpkt_core_rl(priv->parent, s->reg_offset); > + break; > + case BCMASP_STAT_RX_CTRL: > + offset = bcmasp_stat_fixup_offset(priv, s); > + if (offset != ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT) > + offset += sizeof(u32) * priv->port; > + val = rx_ctrl_core_rl(priv->parent, offset); > + break; > + } > + > + j += s->stat_sizeof; > + p = (char *)priv + s->stat_offset; > + *(u32 *)p = val; Is p always 32bit aligned? > + } > +} > + > +static void bcmasp_get_ethtool_stats(struct net_device *dev, > + struct ethtool_stats *stats, > + u64 *data) > +{ > + struct bcmasp_intf *priv = netdev_priv(dev); > + int i, j = 0; > + > + if (netif_running(dev)) > + bcmasp_update_mib_counters(priv); > + > + dev->netdev_ops->ndo_get_stats(dev); > + > + for (i = 0; i < BCMASP_STATS_LEN; i++) { > + const struct bcmasp_stats *s; > + char *p; > + > + s = &bcmasp_gstrings_stats[i]; > + if (!bcmasp_stat_available(priv, s->type)) > + continue; > + if (s->type == BCMASP_STAT_NETDEV) > + p = (char *)&dev->stats; > + else > + p = (char *)priv; > + p += s->stat_offset; > + if (sizeof(unsigned long) != sizeof(u32) && > + s->stat_sizeof == sizeof(unsigned long)) > + data[j] = *(unsigned long *)p; > + else > + data[j] = *(u32 *)p; Maybe memcpy would make this a little easier to read. > + j++; > + } > +} ... > diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c ... > +static int bcmasp_init_rx(struct bcmasp_intf *intf) > +{ > + struct device *kdev = &intf->parent->pdev->dev; > + struct net_device *ndev = intf->ndev; > + void *p; > + dma_addr_t dma; > + struct page *buffer_pg; > + u32 reg; > + int ret; > + > + intf->rx_buf_order = get_order(RING_BUFFER_SIZE); > + buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); > + > + dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, > + DMA_FROM_DEVICE); > + if (dma_mapping_error(kdev, dma)) { > + netdev_err(ndev, "Cannot allocate RX buffer\n"); I think the core will log an error on allocation failure, so the message above is not needed. > + __free_pages(buffer_pg, intf->rx_buf_order); > + return -ENOMEM; > + } ...