pio_copy_data() and load|save_guest_segment_descriptor() return X86EMUL_* values. Mixing up these values with 0, 1, ... may produce unpridictable bugs. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> --- arch/x86/kvm/x86.c | 27 +++++++++++++++------------ 1 files changed, 15 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 78b8ddb..67f8231 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3454,7 +3454,6 @@ int complete_pio(struct kvm_vcpu *vcpu) { struct kvm_pio_request *io = &vcpu->arch.pio; long delta; - int r; unsigned long val; if (!io->string) { @@ -3465,9 +3464,9 @@ int complete_pio(struct kvm_vcpu *vcpu) } } else { if (io->in) { - r = pio_copy_data(vcpu); - if (r) - return r; + int ret = pio_copy_data(vcpu); + if (ret != X86EMUL_CONTINUE) + return 1; } delta = 1; @@ -3567,7 +3566,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, gva_t address, int rep, unsigned port) { unsigned now, in_page; - int ret = 0; vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; @@ -3613,20 +3611,22 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, if (!vcpu->arch.pio.in) { /* string PIO write */ - ret = pio_copy_data(vcpu); + int ret = pio_copy_data(vcpu); if (ret == X86EMUL_PROPAGATE_FAULT) { kvm_inject_gp(vcpu, 0); return 1; } - if (ret == 0 && !pio_string_write(vcpu)) { + if (ret == X86EMUL_UNHANDLEABLE) + return 1; + if (ret == X86EMUL_CONTINUE && !pio_string_write(vcpu)) { complete_pio(vcpu); if (vcpu->arch.pio.count == 0) - ret = 1; + return 1; } } /* no string PIO read support yet */ - return ret; + return 0; } EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); @@ -4743,7 +4743,8 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu)) return kvm_load_realmode_segment(vcpu, selector, seg); - if (load_guest_segment_descriptor(vcpu, selector, &seg_desc)) + if (load_guest_segment_descriptor(vcpu, selector, &seg_desc) + != X86EMUL_CONTINUE) return 1; seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg); @@ -4971,10 +4972,12 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) /* FIXME: Handle errors. Failure to read either TSS or their * descriptors should generate a pagefault. */ - if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) + if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc) + != X86EMUL_CONTINUE) goto out; - if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc)) + if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc) + != X86EMUL_CONTINUE) goto out; if (reason != TASK_SWITCH_IRET) { -- 1.6.3.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html