Hi Sean, On 12/3/2024 4:50 PM, Sean Christopherson wrote: > On Tue, Dec 03, 2024, Melody (Huibo) Wang wrote: >> Hi Sean, >> >> On 12/2/2024 4:11 PM, Sean Christopherson wrote: >> >>> >>> E.g. something like this? Definitely feel free to suggest better names. >>> >>> static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm, >>> u64 response, u64 data) >>> { >>> ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response); >>> ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data); >>> } >>> >> If I make this function more generic where the exit info is set for both KVM >> and the guest, then maybe I can write something like this: > > I like the idea, but I actually think it's better to keep the guest and host code > separate in the case, because the guest code should actually set a triple, e.g. > > static __always_inline void sev_es_vmgexit_set_exit_info(struct ghcb *ghcb, > u64 exit_code, > u64 exit_info_1, > u64 exit_info_2) > { > ghcb_set_sw_exit_code(ghcb, exit_code); > ghcb_set_sw_exit_info_1(ghcb, exit_info_1); > ghcb_set_sw_exit_info_2(ghcb, exit_info_2); > } > > I'm not totally opposed to sharing code, but I think it will be counter-productive > in this specific case. E.g. the guest version needs to be __always_inline so that > it can be used in noinstr code. > >> void ghcb_set_exit_info(struct ghcb *ghcb, >> u64 info1, u64 info2) >> { >> ghcb_set_sw_exit_info_1(ghcb, info1); >> ghcb_set_sw_exit_info_2(ghcb, info2); >> >> } >> This way we can address every possible case that sets the exit info - not only KVM. >> >> And I am not sure about the wrappers for each specific case because we will >> have too many, too specific small functions, but if you want them I can add >> them. > > I count three. We have far, far more wrappers VMX's is_exception_n(), and IMO > those wrappers make the code significantly more readable. Below is an untested yet conversion of the easy cases. I will take a look at converting the more complicated ones where not both exit info are set together, and see whether it looks more readable. Thanks, Melody diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index e7db7a5703b7..0ed6bbdf949e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3433,8 +3433,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) dump_ghcb(svm); } - ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, GHCB_HV_RESP_MALFORMED_INPUT); - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason); + svm_vmgexit_bad_input(svm, reason); /* Resume the guest to "return" the error code. */ return 1; @@ -3577,8 +3576,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) return 0; e_scratch: - ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, GHCB_HV_RESP_MALFORMED_INPUT); - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); + svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_SCRATCH_AREA); return 1; } @@ -4124,8 +4122,7 @@ static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t r return snp_handle_guest_req(svm, req_gpa, resp_gpa); request_invalid: - ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, GHCB_HV_RESP_MALFORMED_INPUT); - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); + svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT); return 1; /* resume guest */ } @@ -4317,8 +4314,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) if (ret) return ret; - ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, GHCB_HV_RESP_SUCCESS); - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0); + svm_vmgexit_success(svm, 0); exit_code = kvm_ghcb_get_sw_exit_code(control); switch (exit_code) { @@ -4367,8 +4363,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) default: pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", control->exit_info_1); - ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, GHCB_HV_RESP_MALFORMED_INPUT); - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); + svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT); } ret = 1; @@ -4397,8 +4392,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) case SVM_VMGEXIT_AP_CREATION: ret = sev_snp_ap_creation(svm); if (ret) { - ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, GHCB_HV_RESP_MALFORMED_INPUT); - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); + svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT); } ret = 1; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 58bce5f1ab0c..104e13d59c8a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2977,11 +2977,7 @@ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) return kvm_complete_insn_gp(vcpu, err); - ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, GHCB_HV_RESP_ISSUE_EXCEPTION); - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, - X86_TRAP_GP | - SVM_EVTINJ_TYPE_EXEPT | - SVM_EVTINJ_VALID); + svm_vmgexit_inject_exception(svm, X86_TRAP_GP); return 1; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 43fa6a16eb19..baff8237c5c9 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -588,6 +588,30 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm) return false; } +static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm, + u64 response, u64 data) +{ + ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response); + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data); +} + +static inline void svm_vmgexit_inject_exception(struct vcpu_svm *svm, u8 vector) +{ + u64 data = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | vector; + + svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_ISSUE_EXCEPTION, data); +} + +static inline void svm_vmgexit_bad_input(struct vcpu_svm *svm, u64 suberror) +{ + svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_MALFORMED_INPUT, suberror); +} + +static inline void svm_vmgexit_success(struct vcpu_svm *svm, u64 data) +{ + svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_SUCCESS, data); +} + /* svm.c */ #define MSR_INVALID 0xffffffffU