Re: [PATCH v5 07/10] kvm: s390: Add CPU dump functionality

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 5/17/22 13:32, Claudio Imbrenda wrote:
On Mon, 16 May 2022 09:08:14 +0000
Janosch Frank <frankja@xxxxxxxxxxxxx> wrote:

The previous patch introduced the per-VM dump functions now let's
focus on dumping the VCPU state via the newly introduced
KVM_S390_PV_CPU_COMMAND ioctl which mirrors the VM UV ioctl and can be
extended with new commands later.

Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxx>
---
  arch/s390/kvm/kvm-s390.c | 73 ++++++++++++++++++++++++++++++++++++++++
  arch/s390/kvm/kvm-s390.h |  1 +
  arch/s390/kvm/pv.c       | 16 +++++++++
  include/uapi/linux/kvm.h |  4 +++
  4 files changed, 94 insertions(+)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6bf9dd85d50f..c0c848c84552 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -5129,6 +5129,52 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
  	return -ENOIOCTLCMD;
  }
+static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
+					struct kvm_pv_cmd *cmd)
+{
+	struct kvm_s390_pv_dmp dmp;
+	void *data;
+	int ret;
+
+	/* Dump initialization is a prerequisite */
+	if (!vcpu->kvm->arch.pv.dumping)
+		return -EINVAL;
+
+	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
+		return -EFAULT;
+
+	/* We only handle this subcmd right now */
+	if (dmp.subcmd != KVM_PV_DUMP_CPU)
+		return -EINVAL;
+
+	/* CPU dump length is the same as create cpu storage donation. */
+	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
+		return -EINVAL;
+
+	data = vzalloc(uv_info.guest_cpu_stor_len);

is it really so big that you need to do a virtual allocation?
(I thought it was at most a few pages)

It's at least a page


can you at least use kvzalloc?

Sure


+	if (!data)
+		return -ENOMEM;
+
+	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
+
+	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
+		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
+
+	if (ret) {
+		vfree(data);
+		return -EINVAL;

if (ret)
	ret = -EINVAL;

+	}
+
+	/* On success copy over the dump data */
+	if (copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len)) {

if (!ret && ...)
	ret = -EFAULT;

+		vfree(data);
+		return -EFAULT;
+	}
+
+	vfree(data);
+	return 0;

return ret;


this way you have only one free and one return, should be more readable

+}
+
  long kvm_arch_vcpu_ioctl(struct file *filp,
  			 unsigned int ioctl, unsigned long arg)
  {
@@ -5293,6 +5339,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
  					   irq_state.len);
  		break;
  	}
+	case KVM_S390_PV_CPU_COMMAND: {
+		struct kvm_pv_cmd cmd;
+
+		r = -EINVAL;
+		if (!is_prot_virt_host())
+			break;
+
+		r = -EFAULT;
+		if (copy_from_user(&cmd, argp, sizeof(cmd)))
+			break;
+
+		r = -EINVAL;
+		if (cmd.flags)
+			break;
+
+		/* We only handle this cmd right now */
+		if (cmd.cmd != KVM_PV_DUMP)
+			break;
+
+		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
+
+		/* Always copy over UV rc / rrc data */
+		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
+				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
+			r = -EFAULT;
+		break;
+	}
  	default:
  		r = -ENOTTY;
  	}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 2868dd0bba25..a39815184350 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -250,6 +250,7 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
  int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
  		       unsigned long tweak, u16 *rc, u16 *rrc);
  int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
+int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc);
  int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
  				u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index fd261667d2c2..8c72330efc0e 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -300,6 +300,22 @@ int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
  	return 0;
  }
+int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
+{
+	struct uv_cb_dump_cpu uvcb = {
+		.header.cmd = UVC_CMD_DUMP_CPU,
+		.header.len = sizeof(uvcb),
+		.cpu_handle = vcpu->arch.pv.handle,
+		.dump_area_origin = (u64)buff,
+	};
+	int cc;
+
+	cc = uv_call_sched(0, (u64)&uvcb);
+	*rc = uvcb.header.rc;
+	*rrc = uvcb.header.rrc;
+	return cc;
+}
+
  /* Size of the cache for the storage state dump data. 1MB for now */
  #define DUMP_BUFF_LEN HPAGE_SIZE
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 1c60c2d314ba..0a8b57654ea7 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1657,6 +1657,7 @@ enum pv_cmd_dmp_id {
  	KVM_PV_DUMP_INIT,
  	KVM_PV_DUMP_CONFIG_STOR_STATE,
  	KVM_PV_DUMP_COMPLETE,
+	KVM_PV_DUMP_CPU,
  };
struct kvm_s390_pv_dmp {
@@ -2118,4 +2119,7 @@ struct kvm_stats_desc {
  /* Available with KVM_CAP_XSAVE2 */
  #define KVM_GET_XSAVE2		  _IOR(KVMIO,  0xcf, struct kvm_xsave)
+/* Available with KVM_CAP_S390_PROTECTED_DUMP */
+#define KVM_S390_PV_CPU_COMMAND	_IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
+
  #endif /* __LINUX_KVM_H */





[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Kernel Development]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite Info]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Linux Media]     [Device Mapper]

  Powered by Linux