On 3/2/22 13:04, Claudio Imbrenda wrote:
On Wed, 23 Feb 2022 09:20:01 +0000
Janosch Frank <frankja@xxxxxxxxxxxxx> wrote:
Some of the query information is already available via sysfs but
having a IOCTL makes the information easier to retrieve.
Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxx>
---
arch/s390/kvm/kvm-s390.c | 47 ++++++++++++++++++++++++++++++++++++++++
include/uapi/linux/kvm.h | 23 ++++++++++++++++++++
2 files changed, 70 insertions(+)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index faa85397b6fb..837f898ad2ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2217,6 +2217,34 @@ static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
return r;
}
+static int kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
+{
+ u32 len;
+
+ switch (info->header.id) {
+ case KVM_PV_INFO_VM: {
+ len = sizeof(info->header) + sizeof(info->vm);
+
+ if (info->header.len < len)
+ return -EINVAL;
so if userspace gives a smaller buffer, we fail?
this means that if the struct grows in the future, existing software
will break?
I've already answered this.
If we extend the struct, we can make this the lower bound and store a
maximum of info->header.len or the length of the extended struct.
I.e. it would work like the QUI 0x100 rc.
Or we can add a new IOCTL which gives the KVM_PV_INFO_VM + new values.
The more interesting question is how we indicate more data and new IOCTL
commands. Do we always bind them to a capability? Should we add a query
in front of the KVM_PV_INFO_VM call which tells us the available calls?
+
+ memcpy(info->vm.inst_calls_list,
+ uv_info.inst_calls_list,
+ sizeof(uv_info.inst_calls_list));
+
+ /* It's max cpuidm not max cpus so it's off by one */
+ info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
+ info->vm.max_guests = uv_info.max_num_sec_conf;
+ info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
+ info->vm.feature_indication = uv_info.uv_feature_indications;
+
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
{
int r = 0;
@@ -2353,6 +2381,25 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
cmd->rc, cmd->rrc);
break;
}
+ case KVM_PV_INFO: {
+ struct kvm_s390_pv_info info = {};
+
+ if (copy_from_user(&info, argp, sizeof(info.header)))
+ return -EFAULT;
+
+ if (info.header.len < sizeof(info.header))
+ return -EINVAL;
+
+ r = kvm_s390_handle_pv_info(&info);
+ if (r)
+ return r;
+
+ r = copy_to_user(argp, &info, sizeof(info));
+
+ if (r)
+ return -EFAULT;
+ return 0;
+ }
default:
r = -ENOTTY;
}
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index dbc550bbd9fa..96fceb204a92 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1642,6 +1642,28 @@ struct kvm_s390_pv_unp {
__u64 tweak;
};
+enum pv_cmd_info_id {
+ KVM_PV_INFO_VM,
+};
+
+struct kvm_s390_pv_info_vm {
+ __u64 inst_calls_list[4];
+ __u64 max_cpus;
+ __u64 max_guests;
+ __u64 max_guest_addr;
+ __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+ __u32 id;
+ __u32 len;
+};
+
+struct kvm_s390_pv_info {
+ struct kvm_s390_pv_info_header header;
+ struct kvm_s390_pv_info_vm vm;
+};
+
enum pv_cmd_id {
KVM_PV_ENABLE,
KVM_PV_DISABLE,
@@ -1650,6 +1672,7 @@ enum pv_cmd_id {
KVM_PV_VERIFY,
KVM_PV_PREP_RESET,
KVM_PV_UNSHARE_ALL,
+ KVM_PV_INFO,
};
struct kvm_pv_cmd {