Search Linux Wireless

Re: [PATCH v2 09/14] net: wwan: t7xx: Add WWAN network interface

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Nov 1, 2021 at 6:57 AM Ricardo Martinez
<ricardo.martinez@xxxxxxxxxxxxxxx> wrote:
> Creates the Cross Core Modem Network Interface (CCMNI) which implements
> the wwan_ops for registration with the WWAN framework, CCMNI also
> implements the net_device_ops functions used by the network device.
> Network device operations include open, close, start transmission, TX
> timeout, change MTU, and select queue.

[skipped]

> diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
> ...
> +static void ccmni_make_etherframe(struct net_device *dev, void *skb_eth_hdr,
> +                                 u8 *mac_addr, unsigned int packet_type)
> +{
> +       struct ethhdr *eth_hdr;
> +
> +       eth_hdr = skb_eth_hdr;
> +       memcpy(eth_hdr->h_dest, mac_addr, sizeof(eth_hdr->h_dest));
> +       memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_source));
> +
> +       if (packet_type == IPV6_VERSION)
> +               eth_hdr->h_proto = cpu_to_be16(ETH_P_IPV6);
> +       else
> +               eth_hdr->h_proto = cpu_to_be16(ETH_P_IP);
> +}

If the modem is a pure IP device, you do not need to forge an Ethernet
header. Moreover this does not make any sense, only odd CPU time
spending. Just set netdev->type to ARPHRD_NONE and send a pure
IPv4/IPv6 packet up to the stack.

> +static enum txq_type get_txq_type(struct sk_buff *skb)
> +{
> +       u32 total_len, payload_len, l4_off;
> +       bool tcp_syn_fin_rst, is_tcp;
> +       struct ipv6hdr *ip6h;
> +       struct tcphdr *tcph;
> +       struct iphdr *ip4h;
> +       u32 packet_type;
> +       __be16 frag_off;
> +
> +       packet_type = skb->data[0] & SBD_PACKET_TYPE_MASK;
> +       if (packet_type == IPV6_VERSION) {
> +               ip6h = (struct ipv6hdr *)skb->data;
> +               total_len = sizeof(struct ipv6hdr) + ntohs(ip6h->payload_len);
> +               l4_off = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip6h->nexthdr, &frag_off);
> +               tcph = (struct tcphdr *)(skb->data + l4_off);
> +               is_tcp = ip6h->nexthdr == IPPROTO_TCP;
> +               payload_len = total_len - l4_off - (tcph->doff << 2);
> +       } else if (packet_type == IPV4_VERSION) {
> +               ip4h = (struct iphdr *)skb->data;
> +               tcph = (struct tcphdr *)(skb->data + (ip4h->ihl << 2));
> +               is_tcp = ip4h->protocol == IPPROTO_TCP;
> +               payload_len = ntohs(ip4h->tot_len) - (ip4h->ihl << 2) - (tcph->doff << 2);
> +       } else {
> +               return TXQ_NORMAL;
> +       }
> +
> +       tcp_syn_fin_rst = tcph->syn || tcph->fin || tcph->rst;
> +       if (is_tcp && !payload_len && !tcp_syn_fin_rst)
> +               return TXQ_FAST;
> +
> +       return TXQ_NORMAL;
> +}

I am wondering how much modem performance has improved with this
optimization compared to the performance loss on each packet due to
the cache miss? Do you have any measurement results?

> +static u16 ccmni_select_queue(struct net_device *dev, struct sk_buff *skb,
> +                             struct net_device *sb_dev)
> +{
> +       struct ccmni_instance *ccmni;
> +
> +       ccmni = netdev_priv(dev);
> +
> +       if (ccmni->ctlb->capability & NIC_CAP_DATA_ACK_DVD)
> +               return get_txq_type(skb);
> +
> +       return TXQ_NORMAL;
> +}
> +
> +static int ccmni_open(struct net_device *dev)
> +{
> +       struct ccmni_instance *ccmni;
> +
> +       ccmni = wwan_netdev_drvpriv(dev);

Move this assignment to the variable definition.

> +       netif_carrier_on(dev);
> +       netif_tx_start_all_queues(dev);
> +       atomic_inc(&ccmni->usage);
> +       return 0;
> +}
> +
> +static int ccmni_close(struct net_device *dev)
> +{
> +       struct ccmni_instance *ccmni;
> +
> +       ccmni = wwan_netdev_drvpriv(dev);

Same here.

> +       if (atomic_dec_return(&ccmni->usage) < 0)
> +               return -EINVAL;
> +
> +       netif_carrier_off(dev);
> +       netif_tx_disable(dev);
> +       return 0;
> +}
> +
> +static int ccmni_send_packet(struct ccmni_instance *ccmni, struct sk_buff *skb, enum txq_type txqt)
> +{
> +       struct ccmni_ctl_block *ctlb;
> +       struct ccci_header *ccci_h;
> +       unsigned int ccmni_idx;
> +
> +       skb_push(skb, sizeof(struct ccci_header));
> +       ccci_h = (struct ccci_header *)skb->data;
> +       ccci_h->status &= ~HDR_FLD_CHN;

Please do not push control data to the skb data. You anyway will
remove them during the enqueuing to HW. This approach will cause a
performance penalty. Also this looks like a ccci_header structure
abuse.

Use a dedicated structure and the skb control buffer (e.g. skb->cb) to
preserve control data while the packet stays in an intermediate queue.

> +       ccmni_idx = ccmni->index;
> +       ccci_h->data[0] = ccmni_idx;
> +       ccci_h->data[1] = skb->len;
> +       ccci_h->reserved = 0;
> +
> +       ctlb = ccmni->ctlb;
> +       if (dpmaif_tx_send_skb(ctlb->hif_ctrl, txqt, skb)) {
> +               skb_pull(skb, sizeof(struct ccci_header));
> +               /* we will reserve header again in the next retry */
> +               return NETDEV_TX_BUSY;
> +       }
> +
> +       return 0;
> +}
> +
> +static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
> +{
> +       struct ccmni_instance *ccmni;
> +       struct ccmni_ctl_block *ctlb;
> +       enum txq_type txqt;
> +       int skb_len;
> +
> +       ccmni = wwan_netdev_drvpriv(dev);

Move assignment to the variable definition.

> +       ctlb = ccmni->ctlb;
> +       txqt = TXQ_NORMAL;
> +       skb_len = skb->len;
> +
> +       /* If MTU changed or there is no headroom, drop the packet */
> +       if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
> +               dev_kfree_skb(skb);
> +               dev->stats.tx_dropped++;
> +               return NETDEV_TX_OK;
> +       }
> +
> +       if (ctlb->capability & NIC_CAP_DATA_ACK_DVD)
> +               txqt = get_txq_type(skb);
> +
> +       if (ccmni_send_packet(ccmni, skb, txqt)) {
> +               if (!(ctlb->capability & NIC_CAP_TXBUSY_STOP)) {
> +                       if ((ccmni->tx_busy_cnt[txqt]++) % 100 == 0)
> +                               netdev_notice(dev, "[TX]CCMNI:%d busy:pkt=%ld(ack=%d) cnt=%ld\n",
> +                                             ccmni->index, dev->stats.tx_packets,
> +                                             txqt, ccmni->tx_busy_cnt[txqt]);

What is the purpose of this message?

> +               } else {
> +                       ccmni->tx_busy_cnt[txqt]++;
> +               }
> +
> +               return NETDEV_TX_BUSY;
> +       }
> +
> +       dev->stats.tx_packets++;
> +       dev->stats.tx_bytes += skb_len;
> +       if (ccmni->tx_busy_cnt[txqt] > 10) {
> +               netdev_notice(dev, "[TX]CCMNI:%d TX busy:tx_pkt=%ld(ack=%d) retries=%ld\n",
> +                             ccmni->index, dev->stats.tx_packets,
> +                             txqt, ccmni->tx_busy_cnt[txqt]);
> +       }
> +       ccmni->tx_busy_cnt[txqt] = 0;
> +
> +       return NETDEV_TX_OK;
> +}
> +
> +static int ccmni_change_mtu(struct net_device *dev, int new_mtu)
> +{
> +       if (new_mtu > CCMNI_MTU_MAX)
> +               return -EINVAL;
> +
> +       dev->mtu = new_mtu;
> +       return 0;
> +}

You do not need this function at all. You already specify the max_mtu
value in the ccmni_wwan_setup(), so the network core code will be
happy to check a user requested MTU against max_mtu for you.

> ...
> +static void ccmni_pre_stop(struct ccmni_ctl_block *ctlb)
> +{
> ...
> +}
> +
> +static void ccmni_pos_stop(struct ccmni_ctl_block *ctlb)

Please consider renaming this function to ccmni_post_stop(). It is
quite hard to figure out what position should be stopped on first code
reading.

> ...
> +static void ccmni_wwan_setup(struct net_device *dev)
> +{
> +       dev->header_ops = NULL;
> +       dev->hard_header_len += sizeof(struct ccci_header);
> +
> +       dev->mtu = WWAN_DEFAULT_MTU;
> +       dev->max_mtu = CCMNI_MTU_MAX;
> +       dev->tx_queue_len = CCMNI_TX_QUEUE;
> +       dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
> +       /* ccmni is a pure IP device */
> +       dev->flags = (IFF_POINTOPOINT | IFF_NOARP)
> +                    & ~(IFF_BROADCAST | IFF_MULTICAST);

You do not need to reset flags on the initial assignment. Just

        dev->flags = IFF_POINTOPOINT | IFF_NOARP;

would be enough.

> +       /* not supporting VLAN */
> +       dev->features = NETIF_F_VLAN_CHALLENGED;
> +
> +       dev->features |= NETIF_F_SG;
> +       dev->hw_features |= NETIF_F_SG;
> +
> +       /* uplink checksum offload */
> +       dev->features |= NETIF_F_HW_CSUM;
> +       dev->hw_features |= NETIF_F_HW_CSUM;
> +
> +       /* downlink checksum offload */
> +       dev->features |= NETIF_F_RXCSUM;
> +       dev->hw_features |= NETIF_F_RXCSUM;
> +
> +       dev->addr_len = ETH_ALEN;

You do not need to configure HW address length as the modem is a pure
IP device. Just drop the above line or explicitly set address length
to zero.

> +       /* use kernel default free_netdev() function */
> +       dev->needs_free_netdev = true;
> +
> +       /* no need to free again because of free_netdev() */
> +       dev->priv_destructor = NULL;
> +       dev->type = ARPHRD_PPP;

Use ARPHRD_NONE here since the modem is a pure IP device. Or you could
use ARPHRD_RAWIP depending on how you would like to allocate the link
IPv6 address. If in doubt then ARPHRD_NONE is a good starting point.

> +       dev->netdev_ops = &ccmni_netdev_ops;
> +       eth_random_addr(dev->dev_addr);

You do not need this random address generation.

> +}
> ...
> +static void ccmni_recv_skb(struct mtk_pci_dev *mtk_dev, int netif_id, struct sk_buff *skb)
> +{
> ...
> +       pkt_type = skb->data[0] & SBD_PACKET_TYPE_MASK;
> +       ccmni_make_etherframe(dev, skb->data - ETH_HLEN, dev->dev_addr, pkt_type);

As I wrote above, you do not need to forge an Ethernet header for pure
IP devices.

> +       skb_set_mac_header(skb, -ETH_HLEN);
> +       skb_reset_network_header(skb);
> +       skb->dev = dev;
> +       if (pkt_type == IPV6_VERSION)
> +               skb->protocol = htons(ETH_P_IPV6);
> +       else
> +               skb->protocol = htons(ETH_P_IP);
> +
> +       skb_len = skb->len;
> +
> +       netif_rx_any_context(skb);

Did you consider using NAPI for the packet Rx path? This should
improve Rx performance.

> +       dev->stats.rx_packets++;
> +       dev->stats.rx_bytes += skb_len;
> +}

[skipped]

> diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h
> ...
> +#define CCMNI_TX_QUEUE         1000

Is this a really carefully selected queue depth limit, or just an
arbitrary value? If the last one, then feel free to use  the
DEFAULT_TX_QUEUE_LEN macro.

> ..
> +#define IPV4_VERSION           0x40
> +#define IPV6_VERSION           0x60

Just curious why the _VERSION suffix? Why not, for example, PKT_TYPE_ prefix?

--
Sergey



[Index of Archives]     [Linux Host AP]     [ATH6KL]     [Linux Wireless Personal Area Network]     [Linux Bluetooth]     [Wireless Regulations]     [Linux Netdev]     [Kernel Newbies]     [Linux Kernel]     [IDE]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Hiking]     [MIPS Linux]     [ARM Linux]     [Linux RAID]

  Powered by Linux