Re: [PATCH 6/6 v5] KVM: PPC: Add userspace debug stub support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 24.06.2013, at 13:22, Bhushan Bharat-R65777 wrote:

> 
> 
>> -----Original Message-----
>> From: Alexander Graf [mailto:agraf@xxxxxxx]
>> Sent: Monday, June 24, 2013 4:13 PM
>> To: Bhushan Bharat-R65777
>> Cc: kvm-ppc@xxxxxxxxxxxxxxx; kvm@xxxxxxxxxxxxxxx; Wood Scott-B07421;
>> tiejun.chen@xxxxxxxxxxxxx; Bhushan Bharat-R65777
>> Subject: Re: [PATCH 6/6 v5] KVM: PPC: Add userspace debug stub support
>> 
>> 
>> On 24.06.2013, at 11:08, Bharat Bhushan wrote:
>> 
>>> This patch adds the debug stub support on booke/bookehv.
>>> Now QEMU debug stub can use hw breakpoint, watchpoint and software
>>> breakpoint to debug guest.
>>> 
>>> This is how we save/restore debug register context when switching
>>> between guest, userspace and kernel user-process:
>>> 
>>> When QEMU is running
>>> -> thread->debug_reg == QEMU debug register context.
>>> -> Kernel will handle switching the debug register on context switch.
>>> -> no vcpu_load() called
>>> 
>>> QEMU makes ioctls (except RUN)
>>> -> This will call vcpu_load()
>>> -> should not change context.
>>> -> Some ioctls can change vcpu debug register, context saved in
>>> -> vcpu->debug_regs
>>> 
>>> QEMU Makes RUN ioctl
>>> -> Save thread->debug_reg on STACK
>>> -> Store thread->debug_reg == vcpu->debug_reg load thread->debug_reg
>>> -> RUN VCPU ( So thread points to vcpu context )
>>> 
>>> Context switch happens When VCPU running
>>> -> makes vcpu_load() should not load any context kernel loads the vcpu
>>> -> context as thread->debug_regs points to vcpu context.
>>> 
>>> On heavyweight_exit
>>> -> Load the context saved on stack in thread->debug_reg
>>> 
>>> Currently we do not support debug resource emulation to guest, On
>>> debug exception, always exit to user space irrespective of user space
>>> is expecting the debug exception or not. If this is unexpected
>>> exception (breakpoint/watchpoint event not set by
>>> userspace) then let us leave the action on user space. This is similar
>>> to what it was before, only thing is that now we have proper exit
>>> state available to user space.
>>> 
>>> Signed-off-by: Bharat Bhushan <bharat.bhushan@xxxxxxxxxxxxx>
>>> ---
>>> arch/powerpc/include/asm/kvm_host.h |    3 +
>>> arch/powerpc/include/uapi/asm/kvm.h |    1 +
>>> arch/powerpc/kvm/booke.c            |  233 ++++++++++++++++++++++++++++++++---
>>> arch/powerpc/kvm/booke.h            |    5 +
>>> 4 files changed, 224 insertions(+), 18 deletions(-)
>>> 
>>> diff --git a/arch/powerpc/include/asm/kvm_host.h
>>> b/arch/powerpc/include/asm/kvm_host.h
>>> index 838a577..aeb490d 100644
>>> --- a/arch/powerpc/include/asm/kvm_host.h
>>> +++ b/arch/powerpc/include/asm/kvm_host.h
>>> @@ -524,7 +524,10 @@ struct kvm_vcpu_arch {
>>> 	u32 eptcfg;
>>> 	u32 epr;
>>> 	u32 crit_save;
>>> +	/* guest debug registers*/
>>> 	struct debug_reg dbg_reg;
>>> +	/* hardware visible debug registers when in guest state */
>>> +	struct debug_reg shadow_dbg_reg;
>>> #endif
>>> 	gpa_t paddr_accessed;
>>> 	gva_t vaddr_accessed;
>>> diff --git a/arch/powerpc/include/uapi/asm/kvm.h
>>> b/arch/powerpc/include/uapi/asm/kvm.h
>>> index ded0607..f5077c2 100644
>>> --- a/arch/powerpc/include/uapi/asm/kvm.h
>>> +++ b/arch/powerpc/include/uapi/asm/kvm.h
>>> @@ -27,6 +27,7 @@
>>> #define __KVM_HAVE_PPC_SMT
>>> #define __KVM_HAVE_IRQCHIP
>>> #define __KVM_HAVE_IRQ_LINE
>>> +#define __KVM_HAVE_GUEST_DEBUG
>>> 
>>> struct kvm_regs {
>>> 	__u64 pc;
>>> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index
>>> 3e9fc1d..8be3502 100644
>>> --- a/arch/powerpc/kvm/booke.c
>>> +++ b/arch/powerpc/kvm/booke.c
>>> @@ -133,6 +133,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu
>>> *vcpu) #endif }
>>> 
>>> +static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) {
>>> +	/* Synchronize guest's desire to get debug interrupts into shadow
>>> +MSR */ #ifndef CONFIG_KVM_BOOKE_HV
>>> +	vcpu->arch.shadow_msr &= ~MSR_DE;
>>> +	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; #endif
>>> +
>>> +	/* Force enable debug interrupts when user space wants to debug */
>>> +	if (vcpu->guest_debug) {
>>> +#ifdef CONFIG_KVM_BOOKE_HV
>>> +		/*
>>> +		 * Since there is no shadow MSR, sync MSR_DE into the guest
>>> +		 * visible MSR.
>>> +		 */
>>> +		vcpu->arch.shared->msr |= MSR_DE;
>>> +#else
>>> +		vcpu->arch.shadow_msr |= MSR_DE;
>>> +		vcpu->arch.shared->msr &= ~MSR_DE;
>>> +#endif
>>> +	}
>>> +}
>>> +
>>> /*
>>> * Helper function for "full" MSR writes.  No need to call this if
>>> only
>>> * EE/CE/ME/DE/RI are changing.
>>> @@ -150,6 +173,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
>>> 	kvmppc_mmu_msr_notify(vcpu, old_msr);
>>> 	kvmppc_vcpu_sync_spe(vcpu);
>>> 	kvmppc_vcpu_sync_fpu(vcpu);
>>> +	kvmppc_vcpu_sync_debug(vcpu);
>>> }
>>> 
>>> static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, @@
>>> -655,6 +679,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
>>> int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) {
>>> 	int ret, s;
>>> +	struct thread_struct thread;
>>> #ifdef CONFIG_PPC_FPU
>>> 	unsigned int fpscr;
>>> 	int fpexc_mode;
>>> @@ -698,12 +723,21 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>>> struct kvm_vcpu *vcpu)
>>> 
>>> 	kvmppc_load_guest_fp(vcpu);
>>> #endif
>>> +	/* Switch to guest debug context */
>>> +	thread.debug = vcpu->arch.shadow_dbg_reg;
>>> +	switch_booke_debug_regs(&thread);
>>> +	thread.debug = current->thread.debug;
>>> +	current->thread.debug = vcpu->arch.shadow_dbg_reg;
>>> 
>>> 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
>>> 
>>> 	/* No need for kvm_guest_exit. It's done in handle_exit.
>>> 	   We also get here with interrupts enabled. */
>>> 
>>> +	/* Switch back to user space debug context */
>>> +	switch_booke_debug_regs(&thread);
>>> +	current->thread.debug = thread.debug;
>>> +
>>> #ifdef CONFIG_PPC_FPU
>>> 	kvmppc_save_guest_fp(vcpu);
>>> 
>>> @@ -759,6 +793,30 @@ static int emulation_exit(struct kvm_run *run, struct
>> kvm_vcpu *vcpu)
>>> 	}
>>> }
>>> 
>>> +static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu
>>> +*vcpu) {
>>> +	struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
>>> +	u32 dbsr = vcpu->arch.dbsr;
>>> +
>>> +	run->debug.arch.status = 0;
>>> +	run->debug.arch.address = vcpu->arch.pc;
>>> +
>>> +	if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
>>> +		run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
>>> +	} else {
>>> +		if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
>>> +			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
>>> +		else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
>>> +			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
>>> +		if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
>>> +			run->debug.arch.address = dbg_reg->dac1;
>>> +		else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
>>> +			run->debug.arch.address = dbg_reg->dac2;
>>> +	}
>>> +
>>> +	return RESUME_HOST;
>>> +}
>>> +
>>> static void kvmppc_fill_pt_regs(struct pt_regs *regs) {
>>> 	ulong r1, ip, msr, lr;
>>> @@ -819,6 +877,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu
>> *vcpu,
>>> 	case BOOKE_INTERRUPT_CRITICAL:
>>> 		unknown_exception(&regs);
>>> 		break;
>>> +	case BOOKE_INTERRUPT_DEBUG:
>>> +		/* Save DBSR before preemption is enabled */
>>> +		vcpu->arch.dbsr = mfspr(SPRN_DBSR);
>>> +		kvmppc_clear_dbsr();
>>> +		break;
>>> 	}
>>> }
>>> 
>>> @@ -1118,18 +1181,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct
>> kvm_vcpu *vcpu,
>>> 	}
>>> 
>>> 	case BOOKE_INTERRUPT_DEBUG: {
>>> -		u32 dbsr;
>>> -
>>> -		vcpu->arch.pc = mfspr(SPRN_CSRR0);
>>> -
>>> -		/* clear IAC events in DBSR register */
>>> -		dbsr = mfspr(SPRN_DBSR);
>>> -		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
>>> -		mtspr(SPRN_DBSR, dbsr);
>>> -
>>> -		run->exit_reason = KVM_EXIT_DEBUG;
>>> +		r = kvmppc_handle_debug(run, vcpu);
>>> +		if (r == RESUME_HOST)
>>> +			run->exit_reason = KVM_EXIT_DEBUG;
>>> 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
>>> -		r = RESUME_HOST;
>>> 		break;
>>> 	}
>>> 
>>> @@ -1180,7 +1235,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
>>> 	kvmppc_set_msr(vcpu, 0);
>>> 
>>> #ifndef CONFIG_KVM_BOOKE_HV
>>> -	vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
>>> +	vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
>>> 	vcpu->arch.shadow_pid = 1;
>>> 	vcpu->arch.shared->msr = 0;
>>> #endif
>>> @@ -1557,12 +1612,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
>> struct kvm_one_reg *reg)
>>> 	return r;
>>> }
>>> 
>>> -int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
>>> -					 struct kvm_guest_debug *dbg)
>>> -{
>>> -	return -EINVAL;
>>> -}
>>> -
>>> int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu
>>> *fpu) {
>>> 	return -ENOTSUPP;
>>> @@ -1668,6 +1717,151 @@ void kvmppc_decrementer_func(unsigned long data)
>>> 	kvmppc_set_tsr_bits(vcpu, TSR_DIS);
>>> }
>>> 
>>> +static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
>>> +				       uint64_t addr, int index)
>>> +{
>>> +	switch (index) {
>>> +	case 0:
>>> +		dbg_reg->dbcr0 |= DBCR0_IAC1;
>>> +		dbg_reg->iac1 = addr;
>>> +		break;
>>> +	case 1:
>>> +		dbg_reg->dbcr0 |= DBCR0_IAC2;
>>> +		dbg_reg->iac2 = addr;
>>> +		break;
>>> +#if CONFIG_PPC_ADV_DEBUG_IACS > 2
>>> +	case 2:
>>> +		dbg_reg->dbcr0 |= DBCR0_IAC3;
>>> +		dbg_reg->iac3 = addr;
>>> +		break;
>>> +	case 3:
>>> +		dbg_reg->dbcr0 |= DBCR0_IAC4;
>>> +		dbg_reg->iac4 = addr;
>>> +		break;
>>> +#endif
>>> +	default:
>>> +		return -EINVAL;
>>> +	}
>>> +
>>> +	dbg_reg->dbcr0 |= DBCR0_IDM;
>>> +	return 0;
>>> +}
>>> +
>>> +static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t
>> addr,
>>> +				       int type, int index)
>>> +{
>>> +	switch (index) {
>>> +	case 0:
>>> +		if (type & KVMPPC_DEBUG_WATCH_READ)
>>> +			dbg_reg->dbcr0 |= DBCR0_DAC1R;
>>> +		if (type & KVMPPC_DEBUG_WATCH_WRITE)
>>> +			dbg_reg->dbcr0 |= DBCR0_DAC1W;
>>> +		dbg_reg->dac1 = addr;
>>> +		break;
>>> +	case 1:
>>> +		if (type & KVMPPC_DEBUG_WATCH_READ)
>>> +			dbg_reg->dbcr0 |= DBCR0_DAC2R;
>>> +		if (type & KVMPPC_DEBUG_WATCH_WRITE)
>>> +			dbg_reg->dbcr0 |= DBCR0_DAC2W;
>>> +		dbg_reg->dac2 = addr;
>>> +		break;
>>> +	default:
>>> +		return -EINVAL;
>>> +	}
>>> +
>>> +	dbg_reg->dbcr0 |= DBCR0_IDM;
>>> +	return 0;
>>> +}
>>> +
>>> +#ifdef CONFIG_KVM_BOOKE_HV
>>> +void kvm_guest_may_change_msr_de(struct kvm_vcpu *vcpu, bool
>>> +may_change) {
>>> +	if (may_change)
>>> +		/* Allow guest to change MSR.DE */
>>> +		vcpu->arch.shadow_msrp &= ~MSRP_DEP;
>>> +	else
>>> +		/* Don't allow guest to change MSR.DE */
>>> +		vcpu->arch.shadow_msrp |= MSRP_DEP;
>>> +
>>> +}
>>> +#endif
>> 
>> Could you please provide this function regardless of CONFIG_KVM_BOOKE_HV? For
>> PR, just save a bool away somewhere and emulate the same behavior (guest may not
>> modify MSR.DE).
> 
> You mean modify kvmppc_set_msr() (kvmppc_vcpu_sync_debug()) to consider this bool to change/not_change msr.de?

Yep :).


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM Development]     [KVM ARM]     [KVM ia64]     [Linux Virtualization]     [Linux USB Devel]     [Linux Video]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Big List of Linux Books]

  Powered by Linux