This patch includes the SVE register IDs in the list returned by KVM_GET_REG_LIST, as appropriate. On a non-SVE-enabled vcpu, no new IDs are added. On an SVE-enabled vcpu, IDs for the FPSIMD V-registers are removed from the list, since userspace is required to access the Z- registers instead in order to access the V-register content. For the variably-sized SVE registers, the appropriate set of slice IDs are enumerated, depending on the maximum vector length for the vcpu. As it currently stands, the SVE architecture never requires more than one slice to exist per register, so this patch adds no explicit support for enumerating multiple slices. The code can be extended straightforwardly to support this in the future, if needed. Signed-off-by: Dave Martin <Dave.Martin@xxxxxxx> --- Changes since v5: (Dropped Julien Thierry's Reviewed-by due to non-trivial rebasing) * Move mis-split reword to prevent put_user()s being accidentally the correct size from KVM: arm64/sve: Add pseudo-register for the guest's vector lengths. --- arch/arm64/kvm/guest.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 736d8cb..585c31e5 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -411,6 +411,56 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; } +static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) +{ + /* Only the first slice ever exists, for now */ + const unsigned int slices = 1; + + if (!vcpu_has_sve(vcpu)) + return 0; + + return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */); +} + +static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + /* Only the first slice ever exists, for now */ + const unsigned int slices = 1; + u64 reg; + unsigned int i, n; + int num_regs = 0; + + if (!vcpu_has_sve(vcpu)) + return 0; + + for (i = 0; i < slices; i++) { + for (n = 0; n < SVE_NUM_ZREGS; n++) { + reg = KVM_REG_ARM64_SVE_ZREG(n, i); + if (put_user(reg, uindices++)) + return -EFAULT; + + num_regs++; + } + + for (n = 0; n < SVE_NUM_PREGS; n++) { + reg = KVM_REG_ARM64_SVE_PREG(n, i); + if (put_user(reg, uindices++)) + return -EFAULT; + + num_regs++; + } + + reg = KVM_REG_ARM64_SVE_FFR(i); + if (put_user(reg, uindices++)) + return -EFAULT; + + num_regs++; + } + + return num_regs; +} + /** * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG * @@ -421,6 +471,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) unsigned long res = 0; res += num_core_regs(vcpu); + res += num_sve_regs(vcpu); res += kvm_arm_num_sys_reg_descs(vcpu); res += kvm_arm_get_fw_num_regs(vcpu); res += NUM_TIMER_REGS; @@ -442,6 +493,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) return ret; uindices += ret; + ret = copy_sve_reg_indices(vcpu, uindices); + if (ret) + return ret; + uindices += ret; + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); if (ret) return ret; -- 2.1.4 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm