On 12/13/2010 06:28 PM, Steven Rostedt wrote:
On Mon, 2010-12-13 at 17:43 +0200, Avi Kivity wrote: > > What's your work flow? Do you > > load kvm modules after you start the trace, or are they always loaded? > > Loaded on boot. Via initramfs?
No, regular printks.
> > > Are the trace_printk's in the core kernel too, and not being printed? > > I don't have any trace_printk()s in the core kernel, only in modules. > Perhaps module initialization does not communicate trace_printk formats? They should. Could you send me a patch that has the trace_printk()s you are using.
Attached (with __trace_printk()s, which is what I used). -- error compiling committee.c: too many arguments to function
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d75ba1e..df86917 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1449,6 +1449,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, if (role.direct) role.cr4_pae = 0; role.access = access; + __trace_printk(_THIS_IP_, + "base_role %x access %x role.access %x role %x\n", + vcpu->arch.mmu.base_role, access, role.access, + role.word); if (!vcpu->arch.mmu.direct_map && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); @@ -1576,6 +1580,11 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (child->role.access == direct_access) return; + __trace_printk(_THIS_IP_, + "child->role %x child->role.access %x direct_access %x\n", + child->role.word, child->role.access, + direct_access); + mmu_page_remove_parent_pte(child, sptep); __set_spte(sptep, shadow_trap_nonpresent_pte); kvm_flush_remote_tlbs(vcpu->kvm); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 4f61fbb..1049729 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -450,6 +450,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, if (!is_present_gpte(gw->ptes[gw->level - 1])) return NULL; + __trace_printk(_THIS_IP_, "pt_access %x pte_access %x dirty %d\n", + gw->pt_access, gw->pte_access, dirty); direct_access = gw->pt_access & gw->pte_access; if (!dirty) direct_access &= ~ACC_WRITE_MASK; @@ -592,6 +594,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (is_error_pfn(pfn)) return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); + __trace_printk(_THIS_IP_, "page_fault: map_writeable %x\n", + map_writable); + spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) goto out_unlock; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 83f5bf6..05481a3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1015,6 +1015,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, if (unlikely(npages != 1) && !atomic) { might_sleep(); + __trace_printk(_THIS_IP_, "%s: addr %lx not writeable\n", + __func__, addr); if (writable) *writable = write_fault;