Re: [PATCH net-next v5 4/9] virtio_net: support device stats

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Apr 15, 2024 at 10:51 AM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote:
>
> On Thu, 11 Apr 2024 14:09:24 +0800, Jason Wang <jasowang@xxxxxxxxxx> wrote:
> > On Wed, Apr 10, 2024 at 6:55 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote:
> > >
> > > On Wed, 10 Apr 2024 14:09:23 +0800, Jason Wang <jasowang@xxxxxxxxxx> wrote:
> > > > On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> wrote:
> > > > >
> > > > > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > > > >
> > > > > make virtio-net support getting the stats from the device by ethtool -S
> > > > > <eth0>.
> > > > >
> > > > > Due to the numerous descriptors stats, an organization method is
> > > > > required. For this purpose, I have introduced the "virtnet_stats_map".
> > > > > Utilizing this array simplifies coding tasks such as generating field
> > > > > names, calculating buffer sizes for requests and responses, and parsing
> > > > > replies from the device. By iterating over the "virtnet_stats_map,"
> > > > > these operations become more streamlined and efficient.
> > > > >
> > > > > NIC statistics:
> > > > >      rx0_packets: 582951
> > > > >      rx0_bytes: 155307077
> > > > >      rx0_drops: 0
> > > > >      rx0_xdp_packets: 0
> > > > >      rx0_xdp_tx: 0
> > > > >      rx0_xdp_redirects: 0
> > > > >      rx0_xdp_drops: 0
> > > > >      rx0_kicks: 17007
> > > > >      rx0_hw_packets: 2179409
> > > > >      rx0_hw_bytes: 510015040
> > > > >      rx0_hw_notifications: 0
> > > > >      rx0_hw_interrupts: 0
> > > > >      rx0_hw_drops: 12964
> > > > >      rx0_hw_drop_overruns: 0
> > > > >      rx0_hw_csum_valid: 2179409
> > > > >      rx0_hw_csum_none: 0
> > > > >      rx0_hw_csum_bad: 0
> > > > >      rx0_hw_needs_csum: 2179409
> > > > >      rx0_hw_ratelimit_packets: 0
> > > > >      rx0_hw_ratelimit_bytes: 0
> > > > >      tx0_packets: 15361
> > > > >      tx0_bytes: 1918970
> > > > >      tx0_xdp_tx: 0
> > > > >      tx0_xdp_tx_drops: 0
> > > > >      tx0_kicks: 15361
> > > > >      tx0_timeouts: 0
> > > > >      tx0_hw_packets: 32272
> > > > >      tx0_hw_bytes: 4311698
> > > > >      tx0_hw_notifications: 0
> > > > >      tx0_hw_interrupts: 0
> > > > >      tx0_hw_drops: 0
> > > > >      tx0_hw_drop_malformed: 0
> > > > >      tx0_hw_csum_none: 0
> > > > >      tx0_hw_needs_csum: 32272
> > > > >      tx0_hw_ratelimit_packets: 0
> > > > >      tx0_hw_ratelimit_bytes: 0
> > > > >
> > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
> > > > > ---
> > > > >  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
> > > > >  1 file changed, 397 insertions(+), 4 deletions(-)
> > > > >
> > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > index 8cb5bdd7ad91..70c1d4e850e0 100644
> > > > > --- a/drivers/net/virtio_net.c
> > > > > +++ b/drivers/net/virtio_net.c
> > > > > @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> > > > >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> > > > >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> > > > >
> > > > > +#define VIRTNET_STATS_DESC_CQ(name) \
> > > > > +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> > > > > +
> > > > > +#define VIRTNET_STATS_DESC_RX(class, name) \
> > > > > +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> > > > > +
> > > > > +#define VIRTNET_STATS_DESC_TX(class, name) \
> > > > > +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_CQ(command_num),
> > > > > +       VIRTNET_STATS_DESC_CQ(ok_num),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(basic, packets),
> > > > > +       VIRTNET_STATS_DESC_RX(basic, bytes),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_RX(basic, notifications),
> > > > > +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_RX(basic, drops),
> > > > > +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(basic, packets),
> > > > > +       VIRTNET_STATS_DESC_TX(basic, bytes),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_TX(basic, notifications),
> > > > > +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_TX(basic, drops),
> > > > > +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> > > > > +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> > > > > +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> > > > > +};
> > > > > +
> > > > > +#define VIRTNET_Q_TYPE_RX 0
> > > > > +#define VIRTNET_Q_TYPE_TX 1
> > > > > +#define VIRTNET_Q_TYPE_CQ 2
> > > > > +
> > > > > +struct virtnet_stats_map {
> > > > > +       /* The stat type in bitmap. */
> > > > > +       u64 stat_type;
> > > > > +
> > > > > +       /* The bytes of the response for the stat. */
> > > > > +       u32 len;
> > > > > +
> > > > > +       /* The num of the response fields for the stat. */
> > > > > +       u32 num;
> > > > > +
> > > > > +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> > > > > +       u32 queue_type;
> > > > > +
> > > > > +       /* The reply type of the stat. */
> > > > > +       u8 reply_type;
> > > > > +
> > > > > +       /* Describe the name and the offset in the response. */
> > > > > +       const struct virtnet_stat_desc *desc;
> > > > > +};
> > > > > +
> > > > > +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> > > > > +       {                                                       \
> > > > > +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> > > > > +               sizeof(struct virtio_net_stats_ ## type),       \
> > > > > +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> > > > > +               VIRTNET_Q_TYPE_##queue_type,                    \
> > > > > +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> > > > > +               &virtnet_stats_##type##_desc[0]                 \
> > > > > +       }
> > > > > +
> > > > > +static struct virtnet_stats_map virtio_net_stats_map[] = {
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> > > > > +
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> > > > > +
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> > > > > +};
> > > >
> > > > I think the reason you did this is to ease the future extensions but
> > > > multiple levels of nested macros makes the code hard to review. Any
> > > > way to eliminate this?
> > >
> > >
> > > NOT only for the future extensions.
> > >
> > > When we parse the reply from the device, we need to check the reply stats
> > > one by one, we need the stats info to help parse the stats.
> >
> > Yes, but I meant for example any reason why it can't be done by
> > extending virtnet_stat_desc ?
>
>
>
> You know, virtio_net_stats_map is way to organize the descs.
>
> This is used to avoid the big if-else when parsing the replys from the device.
>
> If no this map, we will have a big if-else like:
>
>  if (reply.type == rx_basic) {
>          /* do the same something */
>  }
>  if (reply.type == tx_basic) {
>          /* do the same something */
>  }
>  if (reply.type == rx_csum) {
>          /* do the same something */
>  }
>  if (reply.type == tx_csum) {
>          /* do the same something */
>  }
>  if (reply.type == rx_gso) {
>          /* do the same something */
>  }
>  if (reply.type == tx_gso) {
>          /* do the same something */
>  }
>  if (reply.type == rx_speed) {
>          /* do the same something */
>  }
>  if (reply.type == tx_speed) {
>          /* do the same something */
>  }
>
> I want to avoid this, so introducing this map.

Could we have a function pointers array indexed by the type?

Thanks

>
> YES. I noticed other comments, but I think we should
> fix this problem firstly.
>
> Thanks.
>
>
> >
> > >
> > >         static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
> > >                                       struct virtnet_stats_ctx *ctx,
> > >                                       const u8 *base, u8 type)
> > >         {
> > >                u32 queue_type, num_rx, num_tx, num_cq;
> > >                struct virtnet_stats_map *m;
> > >                u64 offset, bitmap;
> > >                const __le64 *v;
> > >                int i, j;
> > >
> > >                num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
> > >                num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
> > >                num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
> > >
> > >                queue_type = vq_type(vi, qid);
> > >                bitmap = ctx->bitmap[queue_type];
> > >                offset = 0;
> > >
> > >                if (queue_type == VIRTNET_Q_TYPE_TX) {
> > >                        offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> > >                        offset += VIRTNET_SQ_STATS_LEN;
> > >                } else if (queue_type == VIRTNET_Q_TYPE_RX) {
> > >                        offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> > >                }
> > >
> > >                for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> > >                        m = &virtio_net_stats_map[i];
> > >
> > > ->                     if (m->stat_type & bitmap)
> > >                                offset += m->num;
> > >
> > > ->                     if (type != m->reply_type)
> > >                                continue;
> > >
> > >                        for (j = 0; j < m->num; ++j) {
> > >                                v = (const __le64 *)(base + m->desc[j].offset);
> > >                                ctx->data[offset + j] = le64_to_cpu(*v);
> > >                        }
> > >
> > >                        break;
> > >                }
> > >         }
> > >
> > > Thanks.
> >
> > Btw, just a reminder, there are other comments for this patch.
> >
> > Thanks
> >
>






[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux