RE: [PATH v1 1/2] vdpa: Add support for querying vendor statistics

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




> From: Eli Cohen <elic@xxxxxxxxxx>
> Sent: Wednesday, November 24, 2021 10:26 PM
> 
> Add support for querying virtqueue statistics. Supported statistics are:
> 
> received_desc - number of descriptors received for the virtqueue
> completed_desc - number of descriptors completed for the virtqueue
> 
> A descriptors using indirect buffers is still counted as 1. In addition, N chained
> descriptors are counted correctly N times as one would expect.
> 
> A new callback was added to vdpa_config_ops which provides the means for
> the vdpa driver to return statistics results.
> 
> The interface allows for reading all the supported virtqueues, including the
> control virtqueue if it exists, by returning the next queue index to query.
> 
> Examples:
> 1. Read statisitics for the virtqueue at index 1 $ vdpa dev vstats show vdpa-a
> qidx 0
> vdpa-a:
> qidx 0 rx received_desc 256 completed_desc 9
> 
> 2. Read statisitics for all the virtqueues $ vdpa dev vstats show vdpa-a
> vdpa-a:
> qidx 0 rx received_desc 256 completed_desc 9 qidx 1 tx received_desc 21
> completed_desc 21 qidx 2 ctrl received_desc 0 completed_desc 0
> 
> Signed-off-by: Eli Cohen <elic@xxxxxxxxxx>
> ---
> v0 -> v1:
> Emphasize that we're dealing with vendor specific counters.
> Terminate query loop on error
> 
>  drivers/vdpa/vdpa.c       | 144 ++++++++++++++++++++++++++++++++++++++
>  include/linux/vdpa.h      |  10 +++
>  include/uapi/linux/vdpa.h |   9 +++
>  3 files changed, 163 insertions(+)
> 
> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index
> 7332a74a4b00..da658c80ff2a 100644
> --- a/drivers/vdpa/vdpa.c
> +++ b/drivers/vdpa/vdpa.c
> @@ -781,6 +781,90 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct
> sk_buff *msg, u32 portid,
>  	return err;
>  }
> 
> +static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff
> +*msg, u16 *index) {
> +	struct vdpa_vq_stats stats;
Better to name it vdpa_vq_vstats as this is reflecting vendor stats.
> +
> +static int vdpa_dev_net_stats_fill(struct vdpa_device *vdev, struct
vdpa_dev_net_vstats_fill

> +sk_buff *msg, u16 index) {
> +	int err;
> +
> +	if (!vdev->config->get_vq_stats)
> +		return -EOPNOTSUPP;
> +
> +	if (index != VDPA_INVAL_QUEUE_INDEX) {
> +		err = vdpa_fill_stats_rec(vdev, msg, &index);
> +		if (err)
> +			return err;
> +	} else {
> +		index = 0;
> +		do {
> +			err = vdpa_fill_stats_rec(vdev, msg, &index);
> +			if (err)
> +				return err;
> +		} while (index != VDPA_INVAL_QUEUE_INDEX);
> +	}
> +
> +	return 0;
> +}
> +
> +static int vdpa_dev_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
> u32 portid,
> +			       u32 seq, int flags, u16 index) {
> +	u32 device_id;
> +	void *hdr;
> +	int err;
> +
> +	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
> +			  VDPA_CMD_DEV_VSTATS_GET);
> +	if (!hdr)
> +		return -EMSGSIZE;
> +
> +	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev-
> >dev))) {
> +		err = -EMSGSIZE;
> +		goto undo_msg;
> +	}
> +
> +	device_id = vdev->config->get_device_id(vdev);
> +	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
> +		err = -EMSGSIZE;
> +		goto undo_msg;
> +	}
> +
> +	err = vdpa_dev_net_stats_fill(vdev, msg, index);
> +
> +	genlmsg_end(msg, hdr);
> +
> +	return err;
> +
> +undo_msg:
> +	genlmsg_cancel(msg, hdr);
> +	return err;
> +}
> +
>  static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct
> genl_info *info)  {
>  	struct vdpa_device *vdev;
> @@ -862,6 +946,59 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff
> *msg, struct netlink_callback *
>  	return msg->len;
>  }
> 
> +static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
> +					  struct genl_info *info)
> +{
> +	struct vdpa_device *vdev;
> +	struct sk_buff *msg;
> +	const char *devname;
> +	struct device *dev;
> +	u16 index = -1;
> +	int err;
> +
> +	if (!info->attrs[VDPA_ATTR_DEV_NAME])
> +		return -EINVAL;
> +
> +	if (info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
> +		index = nla_get_u16(info-
> >attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
> +
> +	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
> +	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
> +	if (!msg)
> +		return -ENOMEM;
> +
> +	mutex_lock(&vdpa_dev_mutex);
> +	dev = bus_find_device(&vdpa_bus, NULL, devname,
> vdpa_name_match);
> +	if (!dev) {
> +		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
> +		err = -ENODEV;
> +		goto dev_err;
> +	}
> +	vdev = container_of(dev, struct vdpa_device, dev);
> +	if (!vdev->mdev) {
> +		NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa
> device");
> +		err = -EINVAL;
> +		goto mdev_err;
> +	}
> +	err = vdpa_dev_stats_fill(vdev, msg, info->snd_portid, info->snd_seq,
> 0, index);
We should be really dumping all queue stats of the device in the dumpit() routine.
But there are some limitation from netlink core to enable it.
Given that, I prefer we limit current vstats dump to single q whose index is specified by the user.

In future dumpit and netlink framework can be enhanced to dump for all queues of the specified device.

> 
> +struct vdpa_vq_stats {
vdpa_ vq_vstats

> 
> +#define VDPA_INVAL_QUEUE_INDEX 0xffff
Lets avoid this and let user space the qindex for now.

_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization



[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux